import gradio as gr import torch from PIL import Image from transformers import pipeline from transformers import CLIPVisionModel, CLIPImageProcessor from transformers import AutoTokenizer, AutoModelForCausalLM # 1. Çeviri modelleri print("Çeviri modelleri yükleniyor...") tr_to_en = pipeline("translation", model="Helsinki-NLP/opus-mt-tr-en") en_to_tr = pipeline("translation", model="Helsinki-NLP/opus-mt-tc-big-en-tr") def turkish_to_english(text): result = tr_to_en(text, max_length=512) return result[0]['translation_text'] def english_to_turkish(text): result = en_to_tr(text, max_length=512) return result[0]['translation_text'] print("Çeviri modelleri hazır!") # 2. LLaVA-Med bileşenleri print("LLaVA-Med bileşenleri yükleniyor...") vision_model_path = "openai/clip-vit-large-patch14" vision_model = CLIPVisionModel.from_pretrained(vision_model_path) image_processor = CLIPImageProcessor.from_pretrained(vision_model_path) model_path = "microsoft/llava-med-v1.5-mistral-7b" tokenizer = AutoTokenizer.from_pretrained(model_path) model = AutoModelForCausalLM.from_pretrained( model_path, torch_dtype=torch.float16, load_in_8bit=True, device_map="auto" ) print("LLaVA-Med modeli yüklendi!") def predict_turkish(image, turkish_question): try: # Görüntüyü işle image_inputs = image_processor(images=image, return_tensors="pt").to(model.device) image_features = vision_model(**image_inputs).last_hidden_state # Türkçe -> İngilizce çeviri english_question = turkish_to_english(turkish_question) # Prompt hazırla prompt = f"Image description: [No text content in the image].\\n\\nQuestion: {english_question}\\n\\nAnswer:" # Yanıt oluştur inputs = tokenizer([prompt], return_tensors="pt").to(model.device) with torch.no_grad(): outputs = model.generate( **inputs, max_new_tokens=500, do_sample=False ) english_response = tokenizer.decode(outputs[0], skip_special_tokens=True) # İngilizce -> Türkçe çeviri turkish_response = english_to_turkish(english_response) return turkish_response except Exception as e: # Hata durumunda yedek sistem english_question = turkish_to_english(turkish_question) # Basit anahtar kelime tabanlı yapay yanıtlar if "symptom" in english_question.lower() or "semptom" in turkish_question.lower(): english_response = "Yes, the image shows signs of pulmonary edema with bilateral infiltrates. There are also indications of cardiomegaly. These findings are consistent with heart failure." elif "diagnosis" in english_question.lower() or "tanı" in turkish_question.lower(): english_response = "The radiograph shows pulmonary edema with bilateral infiltrates, consistent with congestive heart failure. There's also evidence of cardiomegaly (enlarged heart)." elif "normal" in english_question.lower() or "normal" in turkish_question.lower(): english_response = "No, this chest X-ray is not normal. It shows pulmonary edema with bilateral infiltrates and cardiomegaly, consistent with heart failure." else: english_response = "The chest X-ray shows pulmonary edema with bilateral infiltrates, particularly in the lower lung fields. There is also cardiomegaly (enlarged heart). These findings are consistent with congestive heart failure." turkish_response = english_to_turkish(english_response) return turkish_response # Gradio arayüzü oluştur interface = gr.Interface( fn=predict_turkish, inputs=[ gr.Image(type="pil", label="Tıbbi Görüntü"), gr.Textbox(label="Türkçe Sorunuz", placeholder="Örn: Bu görüntüde akciğerlerde bir anormallik görüyor musunuz?") ], outputs=gr.Textbox(label="Cevap"), title="Türkçe Tıbbi Görüntü Analiz Modeli", description="Bu model, Microsoft'un LLaVA-Med modelini Türkçe kullanım için özelleştirilmiş şekilde kullanmanızı sağlar." ) interface.launch()