Spaces:
Sleeping
Sleeping
| import os | |
| import gradio as gr | |
| import requests | |
| import pandas as pd | |
| from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
| import torch | |
| # --- Constants --- | |
| DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space" | |
| # --- Agent Definition --- | |
| class BasicAgent: | |
| def __init__(self): | |
| # Change this model to one you have access to | |
| model_name = "Qwen/Qwen3-0.6B-MLX-bf16" | |
| print(f"Loading model {model_name}") | |
| # Load tokenizer and model | |
| self.tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| self.model = AutoModelForCausalLM.from_pretrained( | |
| model_name, | |
| torch_dtype=torch.float16, | |
| device_map="auto" | |
| ) | |
| # Create generation pipeline | |
| self.generator = pipeline( | |
| "text-generation", | |
| model=self.model, | |
| tokenizer=self.tokenizer, | |
| max_new_tokens=100, | |
| temperature=0.0, | |
| do_sample=False | |
| ) | |
| def __call__(self, question: str) -> str: | |
| print("Question:", question) | |
| prompt = question.strip() | |
| output = self.generator(prompt)[0]["generated_text"] | |
| # Remove the prompt prefix so only the answer remains | |
| if output.startswith(prompt): | |
| answer = output[len(prompt):].strip() | |
| else: | |
| answer = output.strip() | |
| # Take first line if multiple lines | |
| answer = answer.split("\n")[0].strip() | |
| # Optionally strip trailing punctuation | |
| answer = answer.rstrip(" .,:;!?") | |
| print("Answer:", answer) | |
| return answer | |
| def run_and_submit_all(profile: gr.OAuthProfile | None): | |
| space_id = os.getenv("SPACE_ID") | |
| if not profile: | |
| return "Please Login to Hugging Face with the button.", None | |
| username = profile.username | |
| print("User:", username) | |
| api_url = DEFAULT_API_URL | |
| questions_url = f"{api_url}/questions" | |
| submit_url = f"{api_url}/submit" | |
| try: | |
| agent = BasicAgent() | |
| except Exception as e: | |
| return f"Error initializing agent: {e}", None | |
| agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" | |
| # Fetch questions | |
| try: | |
| resp = requests.get(questions_url, timeout=15) | |
| resp.raise_for_status() | |
| questions_data = resp.json() | |
| except Exception as e: | |
| return f"Error fetching questions: {e}", None | |
| results_log = [] | |
| answers_payload = [] | |
| for item in questions_data: | |
| task_id = item.get("task_id") | |
| question_text = item.get("question") | |
| if not task_id or question_text is None: | |
| continue | |
| try: | |
| ans = agent(question_text) | |
| answers_payload.append({"task_id": task_id, "submitted_answer": ans}) | |
| results_log.append({ | |
| "Task ID": task_id, | |
| "Question": question_text, | |
| "Submitted Answer": ans | |
| }) | |
| except Exception as e: | |
| results_log.append({ | |
| "Task ID": task_id, | |
| "Question": question_text, | |
| "Submitted Answer": f"ERROR: {e}" | |
| }) | |
| if not answers_payload: | |
| return "Agent did not produce any answers.", pd.DataFrame(results_log) | |
| submission_data = { | |
| "username": username.strip(), | |
| "agent_code": agent_code, | |
| "answers": answers_payload | |
| } | |
| try: | |
| post_resp = requests.post(submit_url, json=submission_data, timeout=60) | |
| post_resp.raise_for_status() | |
| result = post_resp.json() | |
| status_text = ( | |
| f"Submission Successful!\n" | |
| f"User: {result.get('username')}\n" | |
| f"Overall Score: {result.get('score', 'N/A')}% " | |
| f"({result.get('correct_count', '?')}/{result.get('total_attempted', '?')} correct)\n" | |
| f"Message: {result.get('message', '')}" | |
| ) | |
| return status_text, pd.DataFrame(results_log) | |
| except Exception as e: | |
| return f"Submission Failed: {e}", pd.DataFrame(results_log) | |
| # --- Gradio Interface --- | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# Agent Evaluation Runner") | |
| gr.Markdown( | |
| """ | |
| 1. Login with Hugging Face | |
| 2. Click “Run Evaluation & Submit All Answers” | |
| 3. Wait for score and see your answers | |
| """ | |
| ) | |
| gr.LoginButton() | |
| run_button = gr.Button("Run Evaluation & Submit All Answers") | |
| status_out = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False) | |
| results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True) | |
| run_button.click(fn=run_and_submit_all, outputs=[status_out, results_table]) | |
| if __name__ == "__main__": | |
| demo.launch(debug=True, share=False) | |