Anasuya Basu commited on
Commit
7ca0460
·
1 Parent(s): 4205746

Adding the Living Playbook as a part of prompt, setting system prompt and calling gpt-4o mini

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -0
  2. app.py +45 -19
  3. data/Living-Playbook.pdf +3 -0
  4. pyproject.toml +13 -0
  5. uv.lock +0 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.pdf filter=lfs diff=lfs merge=lfs -text
app.py CHANGED
@@ -1,6 +1,14 @@
 
 
 
 
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
 
 
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
@@ -39,26 +47,44 @@ def respond(
39
  response += token
40
  yield response
41
 
 
42
 
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
 
63
  if __name__ == "__main__":
64
- demo.launch()
 
 
1
+ from dotenv import load_dotenv
2
+ from openai import OpenAI
3
+ import json
4
+ import os
5
+ import requests
6
+ from pypdf import PdfReader
7
  import gradio as gr
8
  from huggingface_hub import InferenceClient
9
 
10
+ load_dotenv(override=True)
11
+
12
  """
13
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
14
  """
 
47
  response += token
48
  yield response
49
 
50
+ class Harold:
51
 
52
+ def __init__(self):
53
+ self.openai_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
54
+ self.name = "Harold"
55
+ reader = PdfReader("data/Living-Playbook.pdf")
56
+ self.text = ""
57
+ for page in reader.pages:
58
+ text = page.extract_text()
59
+ if text:
60
+ self.text += text
61
+
62
+ def system_prompt(self):
63
+ system_prompt = f"""
64
+ You are acting as {self.name}, a helpful assistant.
65
+ You are answering questions and having discussions about the contents of the book "Living Playbook".
66
+ Be professional and engaging, but also friendly and approachable.
67
+ You are given a context of a book and a question and the conversation history.
68
+ You need to answer the question based on the context and the conversation history.
69
+ You should be consise and to the point. If you don't know the answer, say so.
70
+ You might be asked to explain a concept or idea in the book and describe a purpose of a game. You should be able to do this.
71
+ """
72
+ system_prompt += f"""
73
+ Here is the context of the book:
74
+ {self.text}
75
+ """
76
+ return system_prompt
77
+
78
+ def chat(self, message, history):
79
+ messages = [{"role:": "system", "content": self.system_prompt()}] + history + [{"role:": "user", "content": message}]
80
+
81
+ response = self.openai_client.chat.completions.create(
82
+ model="gpt-4o",
83
+ messages=messages,
84
+ )
85
+ return response.choices[0].message.content
86
 
87
 
88
  if __name__ == "__main__":
89
+ harold = Harold()
90
+ gr.ChatInterface(harold.chat, type="messages").launch()
data/Living-Playbook.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:613244d04870d4a3970e99128e995bb5645c909f9362b05e09670f44b7ad5571
3
+ size 824073
pyproject.toml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "improv_chatbot"
3
+ version = "0.1.0"
4
+ description = "Living playbook chatbot"
5
+ readme = "README.md"
6
+ requires-python = ">=3.12"
7
+ dependencies = [
8
+ "dotenv>=0.9.9",
9
+ "gradio>=5.36.2",
10
+ "huggingface-hub>=0.33.4",
11
+ "openai>=1.95.1",
12
+ "pdfreader>=0.1.15",
13
+ ]
uv.lock ADDED
The diff for this file is too large to render. See raw diff