YAML Metadata
Warning:
empty or missing yaml metadata in repo card
(https://huggingface.co/docs/hub/model-cards#model-card-metadata)
These are the adapter used for food recommendations system, KERL. For details of the use, please look at our GitHub.
GitHub: https://github.com/mohbattharani/KERL
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_path = "microsoft/Phi-3-mini-128k-instruct"
adapter = "checkpoints/"
kwargs = {"device_map": "cuda:0"}
system_messge_default = {"role": "system", "content": "You are a knowledgeable language assistant with a deep understanding of food recipes. Leveraging the provided context your role is to assist the user with a variety of tasks using natural language. Your response should contain only names of recommended recipes from context. If you don't know answer, just return an empty string"}
model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, trust_remote_code=True, **kwargs)
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model.load_adapter(adapter + 'keywords', adapter_name='keywords')
model.load_adapter(adapter + 'rec', adapter_name='reco')
model.load_adapter(adapter + 'nutri', adapter_name='nutri')
model.load_adapter(adapter + 'instruct', adapter_name='instruct')
pipe = pipeline(
"text-generation",
model=self.model,
tokenizer=self.tokenizer,
)
q = 'Could you suggest some healthy recipes that use mayonnaise, bananas, and dry white wine, but do contain unsweetened chocolate and steamed rice? The recipes should have no more than 0.24g of salt per 100g, fiber between 3.14g and 5.82g, and less than 2.49g of saturated fat.'
# Extract keywords
model.set_adapter('keywords')
prompt = "Extract the tags and ingredients mentioned in the question provided."
q1 = f"Question: {q}. {prompt}"
messages = [system_messge_default, {"role": "user", "content": f"{q1}"}]
keywords = pipe(messages, **self.generation_args)[0]['generated_text']
# Get recipe recommendations
model.set_adapter('reco')
context = "Provide context here as sub-graphs from KG"
q2 = f"Question: {q}. Context: {context}" + "Answer only names of recipes relevant to the query."
messages = [system_messge_default, {"role": "user", "content": f"{q2}"}]
recipes_recommendation = pipe(messages, **self.generation_args)[0]['generated_text']
# Generate Recipes
dish_name = recipes_recommendation[0]
model.set_adapter('instruct')
q3 = f"Generate the recipe for {dish_name}."
messages = [system_messge_default, {"role": "user", "content": f"{q3}"}]
recipe_steps = pipe(messages, **self.generation_args)[0]['generated_text']
# Generate Nutritions for the recipe
model.set_adapter('nutri')
q4 = f"Generate the nutrition information for the dish named {dish_name}."
messages = [system_messge_default, {"role": "user", "content": f"{q4}"}]
recipe_nutritions = pipe(messages, **self.generation_args)[0]['generated_text']
final_answer = {
'recommendation': recipes_recommendation,
'selected_recipe': dish_name,
'cooking steps': recipe_steps,
'nutritional value': recipe_nutritions
}
license: mit language: - en metrics: - f1 - accuracy base_model: - microsoft/Phi-3-mini-128k-instruct tags: - food - LoRa
Inference Providers
NEW
This model isn't deployed by any Inference Provider.
๐
Ask for provider support