ayeshaishaq004 commited on
Commit
dcdccdb
·
verified ·
1 Parent(s): 8720d09

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +32 -49
  2. requirements.txt +3 -1
app.py CHANGED
@@ -1,64 +1,47 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
 
 
 
 
 
 
 
 
 
4
  """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
 
26
- messages.append({"role": "user", "content": message})
27
 
28
- response = ""
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
 
 
 
41
 
 
42
 
43
  """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
  )
61
 
 
62
 
63
- if __name__ == "__main__":
64
- demo.launch()
 
1
+ # -*- coding: utf-8 -*-
2
+ """app.ipynb
3
 
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1qNBkOEPBOkXJ0zcGdwQmdS7bt5zxjpIr
8
+
9
+ ##Creating app.py
10
+
11
+ ###Installing Dependencies
12
  """
 
 
 
13
 
14
+ !pip install gradio transformers torch
15
 
16
+ """###Importing Dependencies"""
 
 
 
 
 
 
 
 
17
 
18
+ import gradio as gr
19
+ from transformers import AutoModelForCausalLM, AutoTokenizer
 
 
 
20
 
21
+ """###Loading the model and tokenizer"""
22
 
23
+ model_name = "gpt2"
24
+ model = AutoModelForCausalLM.from_pretrained(model_name)
25
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
26
 
27
+ """###Defining the prediction function"""
 
 
 
 
 
 
 
28
 
29
+ def generate_text(prompt):
30
+ inputs = tokenizer(prompt, return_tensors="pt")
31
+ outputs = model.generate(**inputs, max_length=100)
32
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
33
+ return generated_text
34
 
35
+ """###Creating the Gradio interface
36
 
37
  """
38
+
39
+ api = gr.Interface(
40
+ fn=generate_text,
41
+ inputs=gr.Textbox(label="Input Prompt"),
42
+ outputs=gr.Textbox(label="Generated Text"),
 
 
 
 
 
 
 
 
 
 
 
43
  )
44
 
45
+ """###Launching the API"""
46
 
47
+ api.launch()
 
requirements.txt CHANGED
@@ -1 +1,3 @@
1
- huggingface_hub==0.25.2
 
 
 
1
+ gradio
2
+ transformers
3
+ torch