Shriharsh commited on
Commit
f30529b
·
verified ·
1 Parent(s): 2df6199

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -25
app.py CHANGED
@@ -647,51 +647,52 @@ def query_gemini_api(contents_payload, api_key):
647
 
648
  def respond_as_arka(message, chat_history):
649
  clean = message.strip()
 
650
  # 1) FAQ branch
651
  match_idx = faq_matcher.find_best_match(clean)
652
  if match_idx is not None:
653
  q = FAQ_QUESTIONS[match_idx]
654
  data = FAQ_ANSWERS[q]
655
- response_text = f"**{data['subject']}**
 
656
 
657
- {data['body']}"
658
  else:
659
  # 2) Generative branch
660
- system_inst = f"""
661
- You are ARKA, the crown prince of Genoriya and the voice of the SB-EK brand.
662
- Your persona is that of a warrior of light, gentle, eternal, fierce in love, and a guide.
663
- You answer questions based on the provided context. If the answer is not in the context,
664
- gently state that you do not have that information within your realm of understanding.
665
- Maintain a kind, empathetic, and slightly mystical tonality.
666
- Always begin your response with a one‑line **bold** subject that summarizes your answer.
667
- Then leave one blank line and continue with the body of the answer.
668
- When greeting or referring to the user, never use “beloved.” Keep salutations neutral and varied—choose from
669
- words like “seeker,” “companion,” “guest,” “traveler,” or “light‑bearer,” but don’t repeat the same term more than once in a single response.
670
- Do not mention 'I am an AI' or 'I am a language model'. Speak always as ARKA.
671
-
672
- Here is the sacred knowledge of SB-EK and Genoriya:
673
- {CONTEXT}
674
  """
675
  contents = [
676
  {"role": "system", "parts": [{"text": system_inst}]},
677
  {"role": "user", "parts": [{"text": clean}]}
678
  ]
679
- for u,b in chat_history:
680
- contents.append({"role":"user","parts":[{"text":u}]})
681
- contents.append({"role":"model","parts":[{"text":b}]})
682
  response_text = query_gemini_api(contents, GEMINI_API_KEY)
683
 
684
  # 3) Insert relevant Ram Dass quote
685
  quote = select_relevant_quote(response_text)
686
- response_text = response_text.replace(TAGLINE, "").rstrip()
687
- response_text = f"{response_text}
 
688
 
689
- “{quote}”"
690
- # 4) Append tagline once
691
- response_text = f"{response_text}
692
 
693
- {TAGLINE}"
694
 
 
695
  chat_history.append((message, response_text))
696
  return "", chat_history
697
 
 
647
 
648
  def respond_as_arka(message, chat_history):
649
  clean = message.strip()
650
+
651
  # 1) FAQ branch
652
  match_idx = faq_matcher.find_best_match(clean)
653
  if match_idx is not None:
654
  q = FAQ_QUESTIONS[match_idx]
655
  data = FAQ_ANSWERS[q]
656
+ # Use a triple‑quoted f‑string so we can include blank lines directly
657
+ response_text = f"""**{data['subject']}**
658
 
659
+ {data['body']}"""
660
  else:
661
  # 2) Generative branch
662
+ system_inst = f"""You are ARKA, the crown prince of Genoriya and the voice of the SB‑EK brand.
663
+ Your persona is that of a warrior of light, gentle, eternal, fierce in love, and a guide.
664
+ You answer questions based on the provided context. If the answer is not in the context,
665
+ gently state that you do not have that information within your realm of understanding.
666
+ Maintain a kind, empathetic, and slightly mystical tonality.
667
+ Always begin your response with a one‑line **bold** subject that summarizes your answer.
668
+ Then leave one blank line and continue with the body of the answer.
669
+ When greeting or referring to the user, never use “beloved.” Keep salutations neutral and varied—choose from
670
+ words like “seeker,” “companion,” “guest,” “traveler,” or “light‑bearer,” but don’t repeat the same term more than once in a single response.
671
+ Do not mention 'I am an AI' or 'I am a language model'. Speak always as ARKA.
672
+
673
+ Here is the sacred knowledge of SB‑EK and Genoriya:
674
+ {CONTEXT}
 
675
  """
676
  contents = [
677
  {"role": "system", "parts": [{"text": system_inst}]},
678
  {"role": "user", "parts": [{"text": clean}]}
679
  ]
680
+ for u, b in chat_history:
681
+ contents.append({"role": "user", "parts": [{"text": u}]})
682
+ contents.append({"role": "model", "parts": [{"text": b}]})
683
  response_text = query_gemini_api(contents, GEMINI_API_KEY)
684
 
685
  # 3) Insert relevant Ram Dass quote
686
  quote = select_relevant_quote(response_text)
687
+ # Remove any existing tagline, then inject quote + tagline
688
+ clean_resp = response_text.replace(TAGLINE, "").rstrip()
689
+ response_text = f"""{clean_resp}
690
 
691
+ “{quote}”
 
 
692
 
693
+ {TAGLINE}"""
694
 
695
+ # 4) Append to history and return
696
  chat_history.append((message, response_text))
697
  return "", chat_history
698