import streamlit as st import torch from transformers import YolosImageProcessor, YolosForObjectDetection, pipeline from PIL import Image import requests st.title("Welcome to 🇷🇺 Translator App!🪆") input = st.text_area("Your input here! 🇬🇧") pipe = pipeline("translation", model="Helsinki-NLP/opus-mt-ru-en") st.write(pipe(input)[0]['translation_text']) # url = st.text_area("Put your URL here") # if url: # image = Image.open(requests.get(url, stream=True).raw) # st.image(image) # model = YolosForObjectDetection.from_pretrained('hustvl/yolos-tiny') # image_processor = YolosImageProcessor.from_pretrained("hustvl/yolos-tiny") # inputs = image_processor(images=image, return_tensors="pt") # outputs = model(**inputs) # # model predicts bounding boxes and corresponding COCO classes # logits = outputs.logits # bboxes = outputs.pred_boxes # st.image(bboxes) # # print results # target_sizes = torch.tensor([image.size[::-1]]) # results = image_processor.post_process_object_detection(outputs, threshold=0.9, target_sizes=target_sizes)[0] # for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): # box = [round(i, 2) for i in box.tolist()] # st.write( # f"Detected {model.config.id2label[label.item()]} with confidence " # f"{round(score.item(), 3)} at location {box}" # )