Spaces:
				
			
			
	
			
			
		Runtime error
		
	
	
	
			
			
	
	
	
	
		
		
		Runtime error
		
	Update app.py
Browse files
    	
        app.py
    CHANGED
    
    | @@ -1,24 +1,18 @@ | |
| 1 | 
            -
            from  | 
| 2 | 
            -
            from mistral_inference.generate import generate
         | 
| 3 |  | 
| 4 | 
            -
             | 
| 5 | 
            -
             | 
| 6 | 
            -
            from mistral_common.protocol.instruct.request import ChatCompletionRequest
         | 
| 7 |  | 
| 8 | 
            -
             | 
| 9 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
| 10 |  | 
| 11 | 
            -
             | 
| 12 | 
            -
             | 
| 13 |  | 
| 14 | 
            -
             | 
| 15 | 
            -
             | 
| 16 | 
            -
             | 
| 17 | 
            -
             | 
| 18 | 
            -
            images = encoded.images
         | 
| 19 | 
            -
            tokens = encoded.tokens
         | 
| 20 | 
            -
             | 
| 21 | 
            -
            out_tokens, _ = generate([tokens], model, images=[images], max_tokens=256, temperature=0.35, eos_id=tokenizer.instruct_tokenizer.tokenizer.eos_id)
         | 
| 22 | 
            -
            result = tokenizer.decode(out_tokens[0])
         | 
| 23 | 
            -
             | 
| 24 | 
            -
            print(result)
         | 
|  | |
| 1 | 
            +
            from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
         | 
|  | |
| 2 |  | 
| 3 | 
            +
            # Hugging Face Model লোড করা
         | 
| 4 | 
            +
            model_id = "deepseek-ai/DeepSeek-R1"
         | 
|  | |
| 5 |  | 
| 6 | 
            +
            model = AutoModelForCausalLM.from_pretrained(
         | 
| 7 | 
            +
                model_id,
         | 
| 8 | 
            +
                trust_remote_code=True,
         | 
| 9 | 
            +
                torch_dtype="auto",  # Automatic dtype (no FP8)
         | 
| 10 | 
            +
                low_cpu_mem_usage=True  # কম মেমোরি ব্যবহার করবে
         | 
| 11 | 
            +
            )
         | 
| 12 |  | 
| 13 | 
            +
            tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
         | 
| 14 | 
            +
            pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
         | 
| 15 |  | 
| 16 | 
            +
            # টেস্ট রান
         | 
| 17 | 
            +
            output = pipe("Hello, who are you?", max_length=100)
         | 
| 18 | 
            +
            print(output)
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
