Nac31 commited on
Commit
3257b28
·
1 Parent(s): d6c48b9

Add app.py

Browse files
Files changed (3) hide show
  1. README.md +4 -3
  2. app.py +53 -0
  3. requirements.txt +14 -0
README.md CHANGED
@@ -1,12 +1,13 @@
1
  ---
2
  title: Sacha 1
3
- emoji: 📊
4
- colorFrom: purple
5
- colorTo: gray
6
  sdk: gradio
7
  sdk_version: 5.20.0
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: Sacha 1
3
+ emoji: 🌍
4
+ colorFrom: pink
5
+ colorTo: purple
6
  sdk: gradio
7
  sdk_version: 5.20.0
8
  app_file: app.py
9
  pinned: false
10
+ short_description: Sacha du BourgPalette
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
+ import torch
4
+ import os
5
+ from dotenv import load_dotenv
6
+ from huggingface_hub import login
7
+
8
+ load_dotenv()
9
+
10
+ # Login to Hugging Face
11
+ hf_token = os.getenv('HF_TOKEN')
12
+ login(hf_token)
13
+
14
+ # Configuration du modèle
15
+ model_path = "mistralai/Pixtral-Large-Instruct-2411"
16
+ dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] == 8 else torch.float16
17
+
18
+ # Initialisation du modèle
19
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
20
+ model = AutoModelForCausalLM.from_pretrained(
21
+ model_path,
22
+ device_map="auto",
23
+ torch_dtype=dtype
24
+ )
25
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
26
+
27
+ def generate_response(message, temperature=0.7, max_new_tokens=500):
28
+ try:
29
+ response = pipe(
30
+ message,
31
+ temperature=temperature,
32
+ max_new_tokens=max_new_tokens,
33
+ do_sample=True
34
+ )
35
+ return response[0]['generated_text']
36
+ except Exception as e:
37
+ return f"Une erreur s'est produite : {str(e)}"
38
+
39
+ # Interface Gradio
40
+ demo = gr.Interface(
41
+ fn=generate_response,
42
+ inputs=[
43
+ gr.Textbox(label="Votre message", placeholder="Entrez votre message ici..."),
44
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.7, label="Température"),
45
+ gr.Slider(minimum=10, maximum=2000, value=500, step=10, label="Nombre de tokens")
46
+ ],
47
+ outputs=gr.Textbox(label="Réponse"),
48
+ title="Chat avec Sacha-Mistral",
49
+ description="Un assistant conversationnel en français basé sur le modèle Sacha-Mistral"
50
+ )
51
+
52
+ if __name__ == "__main__":
53
+ demo.launch(share=True)
requirements.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ transformers
2
+ torch
3
+ accelerate
4
+ datasets
5
+ sentencepiece
6
+ tokenizers
7
+ gradio
8
+ bitsandbytes
9
+ openai
10
+ langchain
11
+ python-dotenv
12
+ langchain-community
13
+ huggingface_hub
14
+ peft