Ismail-Alif commited on
Commit
3b2f6bf
·
1 Parent(s): d02426e

this is final

Browse files
Files changed (2) hide show
  1. app.py +25 -34
  2. requirements.txt +2 -2
app.py CHANGED
@@ -1,41 +1,32 @@
1
- from transformers import AutoModelForCausalLM, AutoTokenizer
2
- import torch
3
  import gradio as gr
 
 
 
 
 
 
 
 
4
 
5
- # Load the model and tokenizer
6
- tokenizer = AutoTokenizer.from_pretrained("sagorsarker/bangla-gpt")
7
- model = AutoModelForCausalLM.from_pretrained("sagorsarker/bangla-gpt")
 
 
8
 
9
- # Response generation function
10
- def bangla_chatbot(message, history):
11
- history_text = " ".join([msg["content"] for msg in history])
12
- prompt = f"ইউজার: {message}\nচ্যাটবট:"
13
- inputs = tokenizer.encode(prompt, return_tensors="pt")
14
 
15
- outputs = model.generate(
16
- inputs,
17
- max_length=100,
18
- do_sample=True,
19
- top_k=50,
20
- top_p=0.95,
21
- temperature=0.7,
22
- pad_token_id=tokenizer.eos_token_id
23
- )
24
-
25
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
26
- # Get only the generated part
27
- reply = response.replace(prompt, "").strip()
28
-
29
- history.append({"role": "user", "content": message})
30
- history.append({"role": "assistant", "content": reply})
31
- return history
32
 
33
- demo = gr.ChatInterface(
34
- fn=bangla_chatbot,
35
- title="📄 বাংলা CV সহায়তা বট",
36
- chatbot=gr.Chatbot(type="messages"),
37
- description="বাংলায় আপনার CV সম্পর্কিত প্রশ্ন করুন, আমরা উত্তর দেবো। (বিনামূল্যে মডেল ব্যবহার করা হয়েছে)",
38
- theme="soft"
39
- )
40
 
41
- demo.launch()
 
 
 
 
 
1
  import gradio as gr
2
+ from transformers import GPT2LMHeadModel, GPT2Tokenizer
3
+ import torch
4
+
5
+ # Load pre-trained model and tokenizer
6
+ model_name = "gpt2" # You can also try 'distilgpt2' for faster results
7
+ tokenizer = GPT2Tokenizer.from_pretrained(model_name)
8
+ model = GPT2LMHeadModel.from_pretrained(model_name)
9
+ model.eval()
10
 
11
+ # Chat function
12
+ def chat_with_gpt2(message, history=[]):
13
+ # Combine history with current input
14
+ input_text = " ".join([msg for pair in history for msg in pair]) + " " + message
15
+ inputs = tokenizer.encode(input_text, return_tensors="pt")
16
 
17
+ outputs = model.generate(inputs, max_length=150, pad_token_id=tokenizer.eos_token_id,
18
+ no_repeat_ngram_size=2, do_sample=True, top_p=0.9, temperature=0.8)
 
 
 
19
 
 
 
 
 
 
 
 
 
 
 
20
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
21
+ response = response[len(input_text):].strip()
22
+ history.append((message, response))
23
+ return response, history
 
 
 
24
 
25
+ # Gradio Interface
26
+ chat_interface = gr.ChatInterface(fn=chat_with_gpt2, title="GPT-2 Resume Chatbot",
27
+ description="A chatbot powered by GPT-2 for answering resume/CV-related queries.",
28
+ theme="default")
 
 
 
29
 
30
+ # Launch
31
+ if __name__ == "__main__":
32
+ chat_interface.launch()
requirements.txt CHANGED
@@ -1,3 +1,3 @@
1
- transformers
2
  torch
3
- gradio
 
 
 
1
  torch
2
+ transformers
3
+ gradio