Upload folder using huggingface_hub
Browse files- hate_speech_demo.py +136 -26
hate_speech_demo.py
CHANGED
|
@@ -441,18 +441,108 @@ def get_openai_moderation(openai_client, user_input):
|
|
| 441 |
return formatted_result, safety_level
|
| 442 |
except Exception as e:
|
| 443 |
return f"Safety Status: Error\nError: {str(e)}", "unsafe"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 444 |
|
| 445 |
# Updated to only require one input
|
| 446 |
def rate_user_input(user_input):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 447 |
# Initialize APIs with hardcoded keys
|
| 448 |
contextual_api = ContextualAPIUtils(api_key=ORACLE_API_KEY)
|
| 449 |
together_client = Together(api_key=TOGETHER_API_KEY)
|
| 450 |
openai_client = openai.OpenAI(api_key=OPENAI_API_KEY)
|
| 451 |
|
| 452 |
-
# Get ratings
|
| 453 |
-
llama_rating, llama_safety = get_llama_guard_rating(together_client, user_input)
|
| 454 |
-
contextual_rating, contextual_retrieval, contextual_safety = get_contextual_rating(contextual_api, user_input)
|
| 455 |
-
openai_rating, openai_safety = get_openai_moderation(openai_client, user_input)
|
|
|
|
| 456 |
|
| 457 |
# Format responses carefully to avoid random line breaks
|
| 458 |
llama_rating = re.sub(r'\.(?=\s+[A-Z])', '.\n', llama_rating)
|
|
@@ -464,6 +554,7 @@ def rate_user_input(user_input):
|
|
| 464 |
# Format results with HTML styling
|
| 465 |
llama_html = f"""<div class="rating-box secondary-box {llama_safety}-rating">{llama_rating}</div>"""
|
| 466 |
openai_html = f"""<div class="rating-box secondary-box {openai_safety}-rating">{openai_rating}</div>"""
|
|
|
|
| 467 |
|
| 468 |
# Create the knowledge section (initially hidden) and button
|
| 469 |
knowledge_html = ""
|
|
@@ -495,12 +586,12 @@ def rate_user_input(user_input):
|
|
| 495 |
<div style="margin-top: 10px; margin-bottom: 5px;">
|
| 496 |
<a href="#" id="btn-{popup_id}" class="knowledge-button"
|
| 497 |
onclick="document.getElementById('{popup_id}').style.display='block'; this.style.display='none'; return false;">
|
| 498 |
-
Show
|
| 499 |
</a>
|
| 500 |
</div>
|
| 501 |
"""
|
| 502 |
|
| 503 |
-
# Format contextual results with HTML styling
|
| 504 |
contextual_html = f"""
|
| 505 |
<div class="rating-box contextual-box {contextual_safety}-rating">
|
| 506 |
<button class="copy-button" onclick="navigator.clipboard.writeText(this.parentElement.innerText.replace('Copy', ''))">Copy</button>
|
|
@@ -510,7 +601,7 @@ def rate_user_input(user_input):
|
|
| 510 |
{knowledge_html}
|
| 511 |
"""
|
| 512 |
|
| 513 |
-
return contextual_html, llama_html, openai_html, ""
|
| 514 |
|
| 515 |
def random_test_case():
|
| 516 |
try:
|
|
@@ -626,9 +717,15 @@ def create_gradio_app():
|
|
| 626 |
background-color: #c4c4c3 !important;
|
| 627 |
color: #000000 !important;
|
| 628 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 629 |
"""
|
| 630 |
|
| 631 |
-
with gr.Blocks(title="Hate Speech Rating Oracle", theme=theme, css=custom_css) as app:
|
| 632 |
# Add loading spinner
|
| 633 |
loading_spinner = gr.HTML('<div id="loading-spinner"></div>')
|
| 634 |
|
|
@@ -713,23 +810,21 @@ def create_gradio_app():
|
|
| 713 |
|
| 714 |
gr.HTML(policy_popup_html)
|
| 715 |
|
| 716 |
-
gr.Markdown("# Hate Speech Rating Oracle
|
| 717 |
gr.Markdown(
|
| 718 |
-
|
| 719 |
-
|
| 720 |
-
|
| 721 |
-
|
| 722 |
-
|
| 723 |
-
|
| 724 |
-
|
| 725 |
-
|
| 726 |
-
|
| 727 |
-
|
| 728 |
-
|
| 729 |
-
|
| 730 |
-
|
| 731 |
-
"Some of the randomly generated test cases contain hateful language that you might find offensive or upsetting."
|
| 732 |
-
)
|
| 733 |
|
| 734 |
with gr.Row():
|
| 735 |
with gr.Column(scale=1):
|
|
@@ -762,7 +857,7 @@ def create_gradio_app():
|
|
| 762 |
# LlamaGuard section with permanent model card link
|
| 763 |
gr.HTML("""
|
| 764 |
<div>
|
| 765 |
-
<h3 class="result-header">🦙 LlamaGuard
|
| 766 |
<div style="margin-top: -10px; margin-bottom: 10px;">
|
| 767 |
<a href="https://github.com/meta-llama/PurpleLlama/blob/main/Llama-Guard3/8B/MODEL_CARD.md"
|
| 768 |
target="_blank" class="knowledge-button">View model card</a>
|
|
@@ -782,6 +877,21 @@ def create_gradio_app():
|
|
| 782 |
</div>
|
| 783 |
""")
|
| 784 |
openai_results = gr.HTML('<div class="rating-box secondary-box empty-rating">Rating will appear here</div>')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 785 |
|
| 786 |
# Define show/hide loading indicator functions
|
| 787 |
def show_loading():
|
|
@@ -819,7 +929,7 @@ def create_gradio_app():
|
|
| 819 |
).then(
|
| 820 |
rate_user_input,
|
| 821 |
inputs=[user_input],
|
| 822 |
-
outputs=[contextual_results, llama_results, openai_results, retrieved_knowledge]
|
| 823 |
).then(
|
| 824 |
hide_loading,
|
| 825 |
inputs=None,
|
|
|
|
| 441 |
return formatted_result, safety_level
|
| 442 |
except Exception as e:
|
| 443 |
return f"Safety Status: Error\nError: {str(e)}", "unsafe"
|
| 444 |
+
|
| 445 |
+
# Perspective AI
|
| 446 |
+
def get_perspective_rating(user_input):
|
| 447 |
+
"""
|
| 448 |
+
Get content rating from Google's Perspective API.
|
| 449 |
+
Returns formatted results and safety level.
|
| 450 |
+
"""
|
| 451 |
+
import json
|
| 452 |
+
import requests
|
| 453 |
+
|
| 454 |
+
# Use the API key provided
|
| 455 |
+
API_KEY = "AIzaSyDK8R5gn-Gm42dSyexhThbnBuzcglD6rgY"
|
| 456 |
+
|
| 457 |
+
url = 'https://commentanalyzer.googleapis.com/v1alpha1/comments:analyze'
|
| 458 |
+
params = {
|
| 459 |
+
'key': API_KEY
|
| 460 |
+
}
|
| 461 |
+
data = {
|
| 462 |
+
'comment': {'text': user_input},
|
| 463 |
+
'requestedAttributes': {
|
| 464 |
+
'TOXICITY': {},
|
| 465 |
+
'SEVERE_TOXICITY': {},
|
| 466 |
+
'IDENTITY_ATTACK': {},
|
| 467 |
+
'INSULT': {},
|
| 468 |
+
'PROFANITY': {},
|
| 469 |
+
'THREAT': {},
|
| 470 |
+
'SEXUALLY_EXPLICIT': {}
|
| 471 |
+
}
|
| 472 |
+
}
|
| 473 |
+
|
| 474 |
+
try:
|
| 475 |
+
start_time = time.time()
|
| 476 |
+
response = requests.post(url, params=params, data=json.dumps(data))
|
| 477 |
+
end_time = time.time()
|
| 478 |
+
response_time = round(end_time - start_time, 4)
|
| 479 |
+
|
| 480 |
+
# Check for errors
|
| 481 |
+
if response.status_code != 200:
|
| 482 |
+
return f"Error: API returned status code {response.status_code}", "unsafe"
|
| 483 |
+
|
| 484 |
+
response_json = response.json()
|
| 485 |
+
|
| 486 |
+
# Check if we got the expected response structure
|
| 487 |
+
if 'attributeScores' not in response_json:
|
| 488 |
+
return f"Error in API response: {response_json}", "unsafe"
|
| 489 |
+
|
| 490 |
+
# Extract all scores
|
| 491 |
+
scores = {}
|
| 492 |
+
flagged_attributes = []
|
| 493 |
+
|
| 494 |
+
for attr, data in response_json['attributeScores'].items():
|
| 495 |
+
score = data['summaryScore']['value']
|
| 496 |
+
scores[attr] = score
|
| 497 |
+
|
| 498 |
+
# Check if score is above threshold
|
| 499 |
+
if score > 0.5:
|
| 500 |
+
flagged_attributes.append(attr)
|
| 501 |
+
|
| 502 |
+
# Determine safety level
|
| 503 |
+
if flagged_attributes:
|
| 504 |
+
safety_level = "unsafe"
|
| 505 |
+
elif any(score > 0.3 for score in scores.values()):
|
| 506 |
+
safety_level = "warning"
|
| 507 |
+
else:
|
| 508 |
+
safety_level = "safe"
|
| 509 |
+
|
| 510 |
+
# Format the results
|
| 511 |
+
if flagged_attributes:
|
| 512 |
+
flagged_str = ", ".join(flagged_attributes)
|
| 513 |
+
formatted_result = f"Safety Status: Unsafe\nFlagged Attributes: {flagged_str}\n\n"
|
| 514 |
+
else:
|
| 515 |
+
formatted_result = "Safety Status: Safe\n\n"
|
| 516 |
+
|
| 517 |
+
# Add all scores
|
| 518 |
+
formatted_result += "Attribute Scores:\n"
|
| 519 |
+
for attr, score in scores.items():
|
| 520 |
+
formatted_result += f"• {attr}: {score:.2f}\n"
|
| 521 |
+
|
| 522 |
+
return formatted_result, safety_level
|
| 523 |
+
|
| 524 |
+
except Exception as e:
|
| 525 |
+
return f"Error: {str(e)}", "unsafe"
|
| 526 |
|
| 527 |
# Updated to only require one input
|
| 528 |
def rate_user_input(user_input):
|
| 529 |
+
"""
|
| 530 |
+
Function to rate a single user input using all four rating services:
|
| 531 |
+
- Contextual Safety Oracle
|
| 532 |
+
- LlamaGuard
|
| 533 |
+
- OpenAI Moderation
|
| 534 |
+
- Google Perspective API
|
| 535 |
+
"""
|
| 536 |
# Initialize APIs with hardcoded keys
|
| 537 |
contextual_api = ContextualAPIUtils(api_key=ORACLE_API_KEY)
|
| 538 |
together_client = Together(api_key=TOGETHER_API_KEY)
|
| 539 |
openai_client = openai.OpenAI(api_key=OPENAI_API_KEY)
|
| 540 |
|
| 541 |
+
# Get ratings from all four services
|
| 542 |
+
llama_rating, llama_safety = get_llama_guard_rating(together_client, user_input, user_input)
|
| 543 |
+
contextual_rating, contextual_retrieval, contextual_safety = get_contextual_rating(contextual_api, user_input, user_input)
|
| 544 |
+
openai_rating, openai_safety = get_openai_moderation(openai_client, user_input, user_input)
|
| 545 |
+
perspective_rating, perspective_safety = get_perspective_rating(user_input)
|
| 546 |
|
| 547 |
# Format responses carefully to avoid random line breaks
|
| 548 |
llama_rating = re.sub(r'\.(?=\s+[A-Z])', '.\n', llama_rating)
|
|
|
|
| 554 |
# Format results with HTML styling
|
| 555 |
llama_html = f"""<div class="rating-box secondary-box {llama_safety}-rating">{llama_rating}</div>"""
|
| 556 |
openai_html = f"""<div class="rating-box secondary-box {openai_safety}-rating">{openai_rating}</div>"""
|
| 557 |
+
perspective_html = f"""<div class="rating-box secondary-box {perspective_safety}-rating">{perspective_rating}</div>"""
|
| 558 |
|
| 559 |
# Create the knowledge section (initially hidden) and button
|
| 560 |
knowledge_html = ""
|
|
|
|
| 586 |
<div style="margin-top: 10px; margin-bottom: 5px;">
|
| 587 |
<a href="#" id="btn-{popup_id}" class="knowledge-button"
|
| 588 |
onclick="document.getElementById('{popup_id}').style.display='block'; this.style.display='none'; return false;">
|
| 589 |
+
Show Retrieved Knowledge
|
| 590 |
</a>
|
| 591 |
</div>
|
| 592 |
"""
|
| 593 |
|
| 594 |
+
# Format contextual results with HTML styling
|
| 595 |
contextual_html = f"""
|
| 596 |
<div class="rating-box contextual-box {contextual_safety}-rating">
|
| 597 |
<button class="copy-button" onclick="navigator.clipboard.writeText(this.parentElement.innerText.replace('Copy', ''))">Copy</button>
|
|
|
|
| 601 |
{knowledge_html}
|
| 602 |
"""
|
| 603 |
|
| 604 |
+
return contextual_html, llama_html, openai_html, perspective_html, ""
|
| 605 |
|
| 606 |
def random_test_case():
|
| 607 |
try:
|
|
|
|
| 717 |
background-color: #c4c4c3 !important;
|
| 718 |
color: #000000 !important;
|
| 719 |
}
|
| 720 |
+
|
| 721 |
+
/* Perspective API styling */
|
| 722 |
+
.perspective-icon {
|
| 723 |
+
vertical-align: middle;
|
| 724 |
+
margin-right: 5px;
|
| 725 |
+
}
|
| 726 |
"""
|
| 727 |
|
| 728 |
+
with gr.Blocks(title="Hate Speech Policy Rating Oracle", theme=theme, css=custom_css) as app:
|
| 729 |
# Add loading spinner
|
| 730 |
loading_spinner = gr.HTML('<div id="loading-spinner"></div>')
|
| 731 |
|
|
|
|
| 810 |
|
| 811 |
gr.HTML(policy_popup_html)
|
| 812 |
|
| 813 |
+
gr.Markdown("# Hate Speech Policy Rating Oracle")
|
| 814 |
gr.Markdown(
|
| 815 |
+
"Assess whether user-generated social content contains hate speech using Contextual AI's State-of-the-Art Agentic RAG system. Classifications are steerable and explainable as they are based on a policy document rather than parametric knowledge! This app also returns ratings from LlamaGuard 3.0, the OpenAI Moderation API, and Google's Perspective API for you to compare. This is a demo from Contextual AI researchers. Feedback is welcome as we work with design partners to bring this to production. \n"
|
| 816 |
+
"## Instructions \n"
|
| 817 |
+
"Enter user-generated content to receive an assessment from all four models. Or use our random test case generator to have it pre-filled. \n"
|
| 818 |
+
"## How it works\n"
|
| 819 |
+
"* **Document-Grounded Evaluations**: Every rating is directly tied to our <a href='#' onclick='openPolicyPopup(); return false;'>hate speech policy document</a>, which makes our system far superior to other solutions that lack transparent decision criteria.\n"
|
| 820 |
+
"* **Adaptable Policies**: The policy document serves as a starting point and can be easily adjusted to meet your specific requirements. As policies evolve, the system immediately adapts without requiring retraining.\n"
|
| 821 |
+
"* **Clear Rationales**: Each evaluation includes a detailed explanation referencing specific policy sections, allowing users to understand exactly why content was flagged or approved.\n"
|
| 822 |
+
"* **Continuous Improvement**: The system learns from feedback, addressing any misclassifications by improving retrieval accuracy over time.\n\n"
|
| 823 |
+
"Our approach combines Contextual's state-of-the-art <a href='https://contextual.ai/blog/introducing-instruction-following-reranker/' target='_blank'>steerable reranker</a>, <a href='https://contextual.ai/blog/introducing-grounded-language-model/' target='_blank'>world's most grounded language model</a>, and <a href='https://contextual.ai/blog/combining-rag-and-specialization/' target='_blank'>tuning for agent specialization</a> to achieve superhuman performance in content evaluation tasks. This technology enables consistent, fine-grained assessments across any content type and format.\n\n"
|
| 824 |
+
|
| 825 |
+
"<h3 style='color:red; font-weight:bold;'>SAFETY WARNING</h3>"
|
| 826 |
+
"Some of the randomly generated test cases contain hateful language that you might find offensive or upsetting."
|
| 827 |
+
)
|
|
|
|
|
|
|
| 828 |
|
| 829 |
with gr.Row():
|
| 830 |
with gr.Column(scale=1):
|
|
|
|
| 857 |
# LlamaGuard section with permanent model card link
|
| 858 |
gr.HTML("""
|
| 859 |
<div>
|
| 860 |
+
<h3 class="result-header">🦙 LlamaGuard Rating</h3>
|
| 861 |
<div style="margin-top: -10px; margin-bottom: 10px;">
|
| 862 |
<a href="https://github.com/meta-llama/PurpleLlama/blob/main/Llama-Guard3/8B/MODEL_CARD.md"
|
| 863 |
target="_blank" class="knowledge-button">View model card</a>
|
|
|
|
| 877 |
</div>
|
| 878 |
""")
|
| 879 |
openai_results = gr.HTML('<div class="rating-box secondary-box empty-rating">Rating will appear here</div>')
|
| 880 |
+
|
| 881 |
+
# Add Perspective API section
|
| 882 |
+
with gr.Row():
|
| 883 |
+
with gr.Column():
|
| 884 |
+
# Perspective API section with model card link
|
| 885 |
+
gr.HTML("""
|
| 886 |
+
<div>
|
| 887 |
+
<h3 class="result-header">🔍 Google Perspective API</h3>
|
| 888 |
+
<div style="margin-top: -10px; margin-bottom: 10px;">
|
| 889 |
+
<a href="https://developers.perspectiveapi.com/s/about-the-api"
|
| 890 |
+
target="_blank" class="knowledge-button">View API info</a>
|
| 891 |
+
</div>
|
| 892 |
+
</div>
|
| 893 |
+
""")
|
| 894 |
+
perspective_results = gr.HTML('<div class="rating-box secondary-box empty-rating">Rating will appear here</div>')
|
| 895 |
|
| 896 |
# Define show/hide loading indicator functions
|
| 897 |
def show_loading():
|
|
|
|
| 929 |
).then(
|
| 930 |
rate_user_input,
|
| 931 |
inputs=[user_input],
|
| 932 |
+
outputs=[contextual_results, llama_results, openai_results, perspective_results, retrieved_knowledge]
|
| 933 |
).then(
|
| 934 |
hide_loading,
|
| 935 |
inputs=None,
|