Update app.py
Browse files
app.py
CHANGED
|
@@ -213,7 +213,7 @@ class Translators:
|
|
| 213 |
def salamandratapipe(self):
|
| 214 |
pipe = pipeline("text-generation", model=self.model_name)
|
| 215 |
messages = [{"role": "user", "content": f"Translate the following text from {self.sl} into {self.tl}.\n{self.sl}: {self.input_text} \n{self.tl}:"}]
|
| 216 |
-
return pipe(messages)[0]["generated_text"][1]["content"]
|
| 217 |
|
| 218 |
def salamandrata(self):
|
| 219 |
text = f"Translate the following text from {self.sl} into {self.tl}.\n{self.sl}: {self.input_text} \n{self.tl}:"
|
|
@@ -221,7 +221,7 @@ class Translators:
|
|
| 221 |
model = AutoModelForCausalLM.from_pretrained(
|
| 222 |
self.model_name,
|
| 223 |
device_map="auto",
|
| 224 |
-
|
| 225 |
)
|
| 226 |
message = [{"role": "user", "content": text}]
|
| 227 |
# from datetime import datetime
|
|
|
|
| 213 |
def salamandratapipe(self):
|
| 214 |
pipe = pipeline("text-generation", model=self.model_name)
|
| 215 |
messages = [{"role": "user", "content": f"Translate the following text from {self.sl} into {self.tl}.\n{self.sl}: {self.input_text} \n{self.tl}:"}]
|
| 216 |
+
return pipe(messages, max_new_tokens=512, early_stopping=True, num_beams=5)[0]["generated_text"][1]["content"]
|
| 217 |
|
| 218 |
def salamandrata(self):
|
| 219 |
text = f"Translate the following text from {self.sl} into {self.tl}.\n{self.sl}: {self.input_text} \n{self.tl}:"
|
|
|
|
| 221 |
model = AutoModelForCausalLM.from_pretrained(
|
| 222 |
self.model_name,
|
| 223 |
device_map="auto",
|
| 224 |
+
dtype=torch.bfloat16
|
| 225 |
)
|
| 226 |
message = [{"role": "user", "content": text}]
|
| 227 |
# from datetime import datetime
|