Spaces:
Sleeping
Sleeping
Commit
·
c550f81
1
Parent(s):
2dd438d
Remove dropdown
Browse files
app.py
CHANGED
|
@@ -23,6 +23,15 @@ login(token=HF_LE_LLM_READ_TOKEN)
|
|
| 23 |
DEFAULT_MODEL = "le-llm/manipulative-score-model"
|
| 24 |
DEVICE = "cuda"
|
| 25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
# --- Cache to avoid repeated reloads ---
|
| 28 |
_model_cache: Dict[str, tuple[torch.nn.Module, AutoTokenizer]] = {}
|
|
@@ -63,29 +72,23 @@ def compute_score(text: str, model: torch.nn.Module, tokenizer: AutoTokenizer) -
|
|
| 63 |
|
| 64 |
# --- Main scoring logic ---
|
| 65 |
@spaces.GPU
|
| 66 |
-
def bot(user_message: str, history: list[dict[str, Any]]
|
| 67 |
if not user_message.strip():
|
| 68 |
return "", history
|
| 69 |
|
| 70 |
-
|
| 71 |
history = history + [{"role": "user", "content": user_message}]
|
| 72 |
-
|
|
|
|
|
|
|
|
|
|
| 73 |
|
| 74 |
-
history.append({"role": "assistant", "content":
|
| 75 |
return "", history
|
| 76 |
|
| 77 |
# --- UI ---
|
| 78 |
THEME = gr.themes.Soft(primary_hue="blue", secondary_hue="amber", neutral_hue="stone")
|
| 79 |
|
| 80 |
-
MODEL_OPTIONS = [
|
| 81 |
-
"le-llm/manipulative-score-model",
|
| 82 |
-
"le-llm/gec-score-model",
|
| 83 |
-
"le-llm/fineweb-mixtral-edu-score",
|
| 84 |
-
"le-llm/fineweb-nemotron-edu-score",
|
| 85 |
-
"le-llm/alignment-score-model",
|
| 86 |
-
"le-llm/fasttext-quality-score",
|
| 87 |
-
|
| 88 |
-
]
|
| 89 |
|
| 90 |
def _clear_chat():
|
| 91 |
return "", []
|
|
@@ -94,14 +97,11 @@ def _clear_chat():
|
|
| 94 |
with gr.Blocks(theme=THEME, fill_height=True) as demo:
|
| 95 |
gr.Markdown("### 🤔 LAPA Quality Estimation")
|
| 96 |
|
| 97 |
-
with gr.Row():
|
| 98 |
-
model_choice = gr.Dropdown(MODEL_OPTIONS, value=DEFAULT_MODEL, label="Select Model")
|
| 99 |
-
|
| 100 |
chatbot = gr.Chatbot(type="messages", height=480)
|
| 101 |
msg = gr.Textbox(label=None, placeholder="Type your text…", lines=1)
|
| 102 |
clear_btn = gr.Button("Clear")
|
| 103 |
|
| 104 |
-
msg.submit(bot, inputs=[msg, chatbot
|
| 105 |
clear_btn.click(_clear_chat, outputs=[msg, chatbot])
|
| 106 |
|
| 107 |
if __name__ == "__main__":
|
|
|
|
| 23 |
DEFAULT_MODEL = "le-llm/manipulative-score-model"
|
| 24 |
DEVICE = "cuda"
|
| 25 |
|
| 26 |
+
MODEL_OPTIONS = [
|
| 27 |
+
"le-llm/manipulative-score-model",
|
| 28 |
+
"le-llm/gec-score-model",
|
| 29 |
+
"le-llm/fineweb-mixtral-edu-score",
|
| 30 |
+
"le-llm/fineweb-nemotron-edu-score",
|
| 31 |
+
"le-llm/alignment-score-model",
|
| 32 |
+
"le-llm/fasttext-quality-score",
|
| 33 |
+
|
| 34 |
+
]
|
| 35 |
|
| 36 |
# --- Cache to avoid repeated reloads ---
|
| 37 |
_model_cache: Dict[str, tuple[torch.nn.Module, AutoTokenizer]] = {}
|
|
|
|
| 72 |
|
| 73 |
# --- Main scoring logic ---
|
| 74 |
@spaces.GPU
|
| 75 |
+
def bot(user_message: str, history: list[dict[str, Any]]):
|
| 76 |
if not user_message.strip():
|
| 77 |
return "", history
|
| 78 |
|
| 79 |
+
res = ""
|
| 80 |
history = history + [{"role": "user", "content": user_message}]
|
| 81 |
+
for model_choice in MODEL_OPTIONS:
|
| 82 |
+
model, tokenizer = load_model(model_choice) # returns embedding model
|
| 83 |
+
score = compute_score(user_message, model, tokenizer)["score"]
|
| 84 |
+
res += f"{model_choice}: {score}\n"
|
| 85 |
|
| 86 |
+
history.append({"role": "assistant", "content": res.strip()})
|
| 87 |
return "", history
|
| 88 |
|
| 89 |
# --- UI ---
|
| 90 |
THEME = gr.themes.Soft(primary_hue="blue", secondary_hue="amber", neutral_hue="stone")
|
| 91 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
|
| 93 |
def _clear_chat():
|
| 94 |
return "", []
|
|
|
|
| 97 |
with gr.Blocks(theme=THEME, fill_height=True) as demo:
|
| 98 |
gr.Markdown("### 🤔 LAPA Quality Estimation")
|
| 99 |
|
|
|
|
|
|
|
|
|
|
| 100 |
chatbot = gr.Chatbot(type="messages", height=480)
|
| 101 |
msg = gr.Textbox(label=None, placeholder="Type your text…", lines=1)
|
| 102 |
clear_btn = gr.Button("Clear")
|
| 103 |
|
| 104 |
+
msg.submit(bot, inputs=[msg, chatbot], outputs=[msg, chatbot])
|
| 105 |
clear_btn.click(_clear_chat, outputs=[msg, chatbot])
|
| 106 |
|
| 107 |
if __name__ == "__main__":
|