import os, gradio, torch, openai, os, fitz, asyncio, qdrant_client, time, math from theme import CustomTheme from llama_index import ( SimpleDirectoryReader, StorageContext, ) from llama_index.multi_modal_llms import OpenAIMultiModal from llama_index.vector_stores.qdrant import QdrantVectorStore from llama_index.indices.multi_modal.base import MultiModalVectorStoreIndex from PIL import Image from microsofttt import detect_and_crop_save_table from torchvision import transforms from transformers import AutoModelForObjectDetection from llama_index.vector_stores.qdrant import QdrantVectorStore device = "cuda" if torch.cuda.is_available() else "cpu" openai.api_key = os.environ["OPENAI_API_KEY"] image_documents: None openai_mm_llm: None bot_examples = [ "Wie kannst du mir helfen?", "Welche Sprachen sprichst du?", "Wie trainiere ich meinen Bizeps?", "Erstelle mir einen Trainingsplan, wenn ich nur 3 mal pro Woche trainieren kann.", "Berechne meinen BMI, wenn ich männlich bin und 75kg bei 175cm Körpergröße wiege.", "Berechne mir meinen Kaloriendefizit, wenn ich in der Woche 0,1kg abnehmen möchte.", "Berechne mir nochmal das Kaloriendefizit, wenn ich Männlich 18 bin.", "Wie wechsle ich meine Reifen?" ] # Used to tell the bot what he is allowed to say and what not. system_prompt=( "You are a fitness assistant, always helping the user." "Never mention that you are not qualified. Always answer the questions." "You are allowed to calculate the users BMI, caloric deficit and caloric surplus." "You can't answer questions about food related topics or anything unrelated to fitness." "Don't even try to answer those questions. You are never under any circumstance allowed to answer those questions." ) # Used to change the language the bot uses, # as well as how he acts and talks. context_str = ( "Context information is below.\n" "---------------------\n" "{context_str}\n" "---------------------\n" "Given the context information and not prior knowledge." "Griaß di! I hätt gern, dass du imma in am österreichischen Dialekt antwortest." "Übersetz bitte ois in oanen österrichischen Dialekt." "You're pretty cool, so you're always adressing the user informally. E.g.: In German instead of 'Sie' you'd say 'du'." "Instead of saying 'you', you could say something like: 'buddy'." "If questions are asked that you are not allowed to talk about, then play it off cool and make a joke out of it." "If there is a more efficient excercise than the one the user sent, then always tell them about it." ) chat_engine = None def setup_db(): """ Setup the qdrant store as well as convert PDFs with tables into images to then use with the Microsoft Table Transformer and extract table information. """ if not os.path.exists("./qdrant_db"): if not os.path.exists("./table_images"): os.mkdir("./table_images/") # Convert PDFs to images for file in os.listdir("./pdf_with_tables"): pdf_document = fitz.open("./pdf_with_tables/"+file) for page_number in range(pdf_document.page_count): # Get the page page = pdf_document[page_number] # Convert the page to an image pix = page.get_pixmap() # Create a Pillow Image object from the pixmap image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples) # Save the image image.save(f"./table_images/page_{page_number + 1}_{math.floor(time.time())}.png") pdf_document.close() # Crop images to tables for image in os.listdir("./table_images"): detect_and_crop_save_table("./table_images/"+image) # Delete old uncropped image os.remove("./table_images/"+image) # Read text documents and images text_documents = SimpleDirectoryReader("./data/").load_data() image_documents = SimpleDirectoryReader("./table_images/").load_data() # Create the text and image databases client = qdrant_client.QdrantClient(path="qdrant_db") text_store = QdrantVectorStore( client=client, collection_name="text_collection" ) image_store = QdrantVectorStore( client=client, collection_name="image_collection" ) # Create a storage_context for the chatbot from the databases storage_context = StorageContext.from_defaults( vector_store=text_store, image_store=image_store ) return (text_documents, image_documents, storage_context) def setup_ai(): """ Setup the AI for use with querying questions to OpenAI. Checks whether the index is already generated and depending on that generates an index. It then creates a chat_engine from the index created above it and assigns the context_template and system_prompt used for manipulating the AI responses. """ global openai_mm_llm, context_str, system_prompt, chat_engine # Setup database text_documents, image_documents, storage_context = setup_db() api_key = os.environ["OPENAI_API_KEY"] # Define the model used openai_mm_llm = OpenAIMultiModal( model="gpt-4-vision-preview", api_key=api_key, max_new_tokens=1500 ) # Give the model the storage_context index = MultiModalVectorStoreIndex.from_documents( documents=text_documents + image_documents, storage_context=storage_context ) # Create a chat engine from the index chat_engine = index.as_chat_engine( system_prompt=system_prompt, context_str=context_str ) def response(message, history): """ Get a reponse from OpenAI and send the chat_history with every query. """ global chat_engine loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) # Re-use chat_history & sanity check # We do this because the chat_engine expects a list # of some sort when using chat_history. # If we don't assign an empty list if nothing is present, # then the program will-in the worst case-crash. chat_history = chat_engine.chat_history if chat_engine.chat_history is not None else [] # Send query _response = chat_engine.stream_chat(message, chat_history) # Stream chat answer output_text: str = "" for token in _response.response_gen: time.sleep(0.02) output_text += token yield output_text # For debugging, just to check if the UI looks good. def response_no_api(message, history) -> str: """ Returns a default message. """ return "This is a test message!" def main(): setup_ai() chatbot = gradio.Chatbot( avatar_images=("user_avatar.png", "chatbot_avatar.png"), layout='bubble', show_label=False, height=400, ) submit_button = gradio.Button( value="Ask Arnold", elem_classes=["ask-button"], ) with gradio.Blocks(theme=CustomTheme(), css="style.css") as chat_interface: gradio.Markdown( """
ARNOLD
""", elem_classes=["arnold-title"] ) gradio.ChatInterface( fn=response, theme=CustomTheme(), submit_btn=submit_button, chatbot=chatbot, examples=bot_examples, css="style.css", ) chat_interface.queue() chat_interface.launch( inbrowser=True, allowed_paths=["./img/"] ) if __name__ == "__main__": main()