Files changed (2) hide show
  1. README.md +2 -6
  2. app.py +16 -25
README.md CHANGED
@@ -4,15 +4,11 @@ emoji: 🫡
4
  colorFrom: blue
5
  colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 4.26.0
8
  app_file: app.py
9
- pinned: false
10
  license: mit
11
  short_description: Command-R is optimized for conversational interaction.
12
  ---
13
 
14
- # Changes to README.md
15
-
16
- - 4/10/2024 update to rerank
17
-
18
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
4
  colorFrom: blue
5
  colorTo: indigo
6
  sdk: gradio
7
+ sdk_version: 4.22.0
8
  app_file: app.py
9
+ pinned: true
10
  license: mit
11
  short_description: Command-R is optimized for conversational interaction.
12
  ---
13
 
 
 
 
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -6,7 +6,7 @@ import torch
6
 
7
  title = """
8
  # Welcome to 🌟Tonic's🫡Command-R
9
- 🫡Command-R is a Large Language Model optimized for conversational interaction and long context tasks. It targets the “scalable” category of models that balance high performance with strong accuracy, enabling companies to move beyond proof of concept, and into production. 🫡Command-R boasts high precision on retrieval augmented generation (RAG) and tool use tasks, low latency and high throughput, a long 128k context, and strong capabilities across 10 key languages. You can build with this endpoint using🫡Command-R available here : [CohereForAI/c4ai-command-r-v01](https://huggingface.co/CohereForAI/c4ai-command-r-v01). You can also use 🫡Command-R by cloning this space. Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/Tonic/Command-R?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3>
10
  Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's 🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/GWpVpekp) On 🤗Huggingface:[MultiTransformer](https://huggingface.co/MultiTransformer) Math 🔍 [introspector](https://huggingface.co/introspector) On 🌐Github: [Tonic-AI](https://github.com/tonic-ai) & contribute to🌟 [Torchon](https://github.com/Tonic-AI/Torchon)🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
11
  """
12
 
@@ -20,34 +20,25 @@ model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_c
20
  def generate_response(user_input, max_new_tokens, temperature):
21
  messages = [{"role": "user", "content": user_input}]
22
  input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
23
- input_ids = input_ids.to(model.device)
 
24
  gen_tokens = model.generate(
25
- input_ids = input_ids,
26
- max_new_tokens=max_new_tokens,
27
  do_sample=True,
28
  temperature=temperature,
29
  )
30
 
31
- gen_text = tokenizer.decode(gen_tokens[0], skip_special_tokens=True)
32
- if gen_text.startswith(user_input):
33
- gen_text = gen_text[len(user_input):].lstrip()
34
-
35
  return gen_text
36
 
37
 
38
-
39
  examples = [
40
  {"message": "What is the weather like today?", "max_new_tokens": 250, "temperature": 0.5},
41
  {"message": "Tell me a joke.", "max_new_tokens": 650, "temperature": 0.7},
42
  {"message": "Explain the concept of machine learning.", "max_new_tokens": 980, "temperature": 0.4}
43
  ]
44
- example_choices = [f"Example {i+1}" for i in range(len(examples))]
45
-
46
- def load_example(choice):
47
- index = example_choices.index(choice)
48
- example = examples[index]
49
- return example["message"], example["max_new_tokens"], example["temperature"]
50
-
51
 
52
  with gr.Blocks() as demo:
53
  gr.Markdown(title)
@@ -57,18 +48,18 @@ with gr.Blocks() as demo:
57
  message_box = gr.Textbox(lines=2, label="Your Message")
58
  generate_button = gr.Button("Try🫡Command-R")
59
  output_box = gr.Textbox(label="🫡Command-R")
 
 
 
 
 
 
 
 
60
 
61
  generate_button.click(
62
  fn=generate_response,
63
  inputs=[message_box, max_new_tokens_slider, temperature_slider],
64
  outputs=output_box
65
  )
66
- example_dropdown = gr.Dropdown(label="🫡Load Example", choices=example_choices)
67
- example_button = gr.Button("🫡Load")
68
- example_button.click(
69
- fn=load_example,
70
- inputs=example_dropdown,
71
- outputs=[message_box, max_new_tokens_slider, temperature_slider]
72
- )
73
-
74
- demo.launch()
 
6
 
7
  title = """
8
  # Welcome to 🌟Tonic's🫡Command-R
9
+ 🫡Command-R is a Large Language Model optimized for conversational interaction and long context tasks. It targets the “scalable” category of models that balance high performance with strong accuracy, enabling companies to move beyond proof of concept, and into production. 🫡Command-R boasts high precision on retrieval augmented generation (RAG) and tool use tasks, low latency and high throughput, a long 128k context, and strong capabilities across 10 key languages. You can build with this endpoint using✨StarCoder available here : [bigcode/starcoder2-15b](https://huggingface.co/bigcode/starcoder2-15b). You can also use 🫡Command-R by cloning this space. Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/Tonic/Command-R?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3>
10
  Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's 🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/GWpVpekp) On 🤗Huggingface:[MultiTransformer](https://huggingface.co/MultiTransformer) Math 🔍 [introspector](https://huggingface.co/introspector) On 🌐Github: [Tonic-AI](https://github.com/tonic-ai) & contribute to🌟 [Torchon](https://github.com/Tonic-AI/Torchon)🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
11
  """
12
 
 
20
  def generate_response(user_input, max_new_tokens, temperature):
21
  messages = [{"role": "user", "content": user_input}]
22
  input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
23
+
24
+ # Generate tokens
25
  gen_tokens = model.generate(
26
+ input_ids['input_ids'],
27
+ max_length=max_new_tokens + input_ids['input_ids'].shape[1], # Adjusting max_length to account for input length
28
  do_sample=True,
29
  temperature=temperature,
30
  )
31
 
32
+ # Decode tokens to string
33
+ gen_text = tokenizer.decode(gen_tokens[0])
 
 
34
  return gen_text
35
 
36
 
 
37
  examples = [
38
  {"message": "What is the weather like today?", "max_new_tokens": 250, "temperature": 0.5},
39
  {"message": "Tell me a joke.", "max_new_tokens": 650, "temperature": 0.7},
40
  {"message": "Explain the concept of machine learning.", "max_new_tokens": 980, "temperature": 0.4}
41
  ]
 
 
 
 
 
 
 
42
 
43
  with gr.Blocks() as demo:
44
  gr.Markdown(title)
 
48
  message_box = gr.Textbox(lines=2, label="Your Message")
49
  generate_button = gr.Button("Try🫡Command-R")
50
  output_box = gr.Textbox(label="🫡Command-R")
51
+ with gr.Accordion("🫡Examples", open=True):
52
+ gr.Examples(
53
+ examples=examples,
54
+ inputs=[message_box, max_new_tokens_slider, temperature_slider],
55
+ outputs=output_box,
56
+ fn=generate_response,
57
+ cache_examples=True
58
+ )
59
 
60
  generate_button.click(
61
  fn=generate_response,
62
  inputs=[message_box, max_new_tokens_slider, temperature_slider],
63
  outputs=output_box
64
  )
65
+ demo.launch()