Spaces:
Sleeping
Sleeping
File size: 1,728 Bytes
6a737a4 f016ab0 992ac6b f016ab0 992ac6b f016ab0 992ac6b 6a737a4 992ac6b 6a737a4 f016ab0 6a737a4 992ac6b 6a737a4 495346b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
import gradio as gr
# import requests
from transformers import pipeline
pipe = pipeline("text-generation", model="shivanikerai/TinyLlama-1.1B-Chat-v1.0-seo-optimised-title-suggestion-v1.0")
# API_URL = "https://api-inference.huggingface.co/models/shivanikerai/TinyLlama-1.1B-Chat-v1.0-seo-optimised-title-suggestion-v1.0"
# def query(payload, api_token):
# response = requests.post(API_URL, headers={"Authorization": f"Bearer {api_token}"}, json=payload)
# return response.json()
def my_function(keywords, product_info):
B_SYS, E_SYS = "<<SYS>>", "<</SYS>>"
B_INST, E_INST = "[INST]", "[/INST]"
B_in, E_in = "[Product Details]", "[/Product Details]"
B_out, E_out = "[Suggested Titles]", "[/Suggested Titles]"
prompt = f"""{B_INST} {B_SYS} You are a helpful, respectful and honest assistant for ecommerce product title creation. {E_SYS}
Create a SEO optimized e-commerce product title for the keywords:{keywords.strip()}
{B_in}{product_info}{E_in}\n{E_INST}\n\n{B_out}"""
predictions = pipe(prompt)
output=((predictions[0]['generated_text']).split(B_out)[-1]).strip()
# output = query({
# "inputs": prompt,
# },api_token)
return (output)
# Process the inputs (e.g., concatenate strings, perform calculations)
# result = f"You entered: {input1} and {input2}"
# return result
# Create the Gradio interface
interface = gr.Interface(fn=my_function,
inputs=["text", "text"],
# inputs=["text", "text", "text"],
outputs="text",
title="SEO Optimised Title Suggestion",
description="Enter Keywords and Product Info:")
interface.launch() |