Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| import torch | |
| # Initialisierung des Modells und des Tokenizers | |
| tokenizer = AutoTokenizer.from_pretrained("Atomic-Ai/AtomicGPT_2") | |
| model = AutoModelForCausalLM.from_pretrained("Atomic-Ai/AtomicGPT_2") | |
| def generate_text(prompt): | |
| input_ids = tokenizer.encode(prompt, return_tensors="pt") | |
| attention_mask = torch.ones(input_ids.shape, dtype=torch.long) | |
| max_length = model.config.n_positions if len(input_ids[0]) > model.config.n_positions else len(input_ids[0]) + 90 | |
| beam_output = model.generate( | |
| input_ids, | |
| attention_mask=attention_mask, | |
| max_length=max_length, | |
| min_length=1, | |
| num_beams=5, | |
| no_repeat_ngram_size=2, | |
| early_stopping=True, | |
| temperature=0.7, | |
| top_p=0.9, | |
| top_k=10, | |
| do_sample=True, | |
| eos_token_id=tokenizer.eos_token_id, | |
| pad_token_id=tokenizer.eos_token_id | |
| ) | |
| text = tokenizer.decode(beam_output[0], skip_special_tokens=True) | |
| return text | |
| css = """ | |
| h1 { | |
| text-align: center; | |
| } | |
| #duplicate-button { | |
| margin: auto; | |
| color: white; | |
| background: #1565c0; | |
| border-radius: 100vh; | |
| } | |
| .contain { | |
| max-width: 900px; | |
| margin: auto; | |
| padding-top: 1.5rem; | |
| } | |
| """ | |
| iface = gr.Interface( | |
| fn=generate_text, | |
| inputs=gr.Textbox(lines=2, placeholder="Type a message...", label="Your Message"), | |
| outputs=gr.Textbox(label="Löwolf Chat Responses", placeholder="Responses will appear here...", interactive=False, lines=10), | |
| css=css | |
| ) | |
| iface.launch() | |