|
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM |
|
|
import torch |
|
|
import json |
|
|
|
|
|
|
|
|
model_path = "./" |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_path) |
|
|
model = AutoModelForSeq2SeqLM.from_pretrained(model_path) |
|
|
model.eval() |
|
|
|
|
|
def handle(event, context): |
|
|
""" |
|
|
event: dict, z kluczem 'text' zawierającym tekst do przetworzenia |
|
|
context: obiekt kontekstowy (zależny od platformy, nieużywany tu) |
|
|
|
|
|
Zwraca dict z wygenerowanym tekstem. |
|
|
""" |
|
|
|
|
|
|
|
|
try: |
|
|
if isinstance(event, str): |
|
|
event = json.loads(event) |
|
|
text = event.get("text", "") |
|
|
except Exception as e: |
|
|
return {"error": f"Błąd parsowania danych wejściowych: {str(e)}"} |
|
|
|
|
|
if not text: |
|
|
return {"error": "Brak tekstu do przetworzenia."} |
|
|
|
|
|
|
|
|
inputs = tokenizer(text, return_tensors="pt") |
|
|
with torch.no_grad(): |
|
|
outputs = model.generate(**inputs) |
|
|
|
|
|
result = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
|
|
return {"generated_text": result} |
|
|
|