Spaces:
Running
on
Zero
Running
on
Zero
Yurii Paniv
commited on
Commit
·
c14a5b1
1
Parent(s):
5010b53
Add logging and print statements for debugging
Browse files
app.py
CHANGED
|
@@ -29,6 +29,7 @@ def check_thread(logging_queue: Queue):
|
|
| 29 |
print("Logging thread started.")
|
| 30 |
print(f"Logging to '{getenv('OUTPUT_DATASET')}'")
|
| 31 |
while True:
|
|
|
|
| 32 |
sleep(60)
|
| 33 |
batch = []
|
| 34 |
while not logging_queue.empty():
|
|
@@ -72,7 +73,6 @@ model = PeftModel.from_pretrained(model, "lang-uk/dragoman").to("cuda")
|
|
| 72 |
tokenizer = AutoTokenizer.from_pretrained(
|
| 73 |
"mistralai/Mistral-7B-v0.1", use_fast=False, add_bos_token=False
|
| 74 |
)
|
| 75 |
-
tokenizer.pad_token_id = tokenizer.eos_token_id
|
| 76 |
|
| 77 |
|
| 78 |
@spaces.GPU(duration=30)
|
|
@@ -88,7 +88,7 @@ def translate(input_text):
|
|
| 88 |
inputs = tokenizer([input_text], return_tensors="pt").to(model.device)
|
| 89 |
|
| 90 |
generation_kwargs = dict(
|
| 91 |
-
inputs, max_new_tokens=200, num_beams=10, temperature=1
|
| 92 |
) # streamer=streamer,
|
| 93 |
|
| 94 |
# streaming support
|
|
|
|
| 29 |
print("Logging thread started.")
|
| 30 |
print(f"Logging to '{getenv('OUTPUT_DATASET')}'")
|
| 31 |
while True:
|
| 32 |
+
print("Checking for logs...")
|
| 33 |
sleep(60)
|
| 34 |
batch = []
|
| 35 |
while not logging_queue.empty():
|
|
|
|
| 73 |
tokenizer = AutoTokenizer.from_pretrained(
|
| 74 |
"mistralai/Mistral-7B-v0.1", use_fast=False, add_bos_token=False
|
| 75 |
)
|
|
|
|
| 76 |
|
| 77 |
|
| 78 |
@spaces.GPU(duration=30)
|
|
|
|
| 88 |
inputs = tokenizer([input_text], return_tensors="pt").to(model.device)
|
| 89 |
|
| 90 |
generation_kwargs = dict(
|
| 91 |
+
inputs, max_new_tokens=200, num_beams=10, temperature=1, pad_token_id=tokenizer.eos_token_id
|
| 92 |
) # streamer=streamer,
|
| 93 |
|
| 94 |
# streaming support
|