|  | from together import Together | 
					
						
						|  | import os | 
					
						
						|  | import pandas as pd | 
					
						
						|  | import sys | 
					
						
						|  | import time | 
					
						
						|  | from dotenv import load_dotenv | 
					
						
						|  | from datetime import datetime | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | load_dotenv("key.env") | 
					
						
						|  | api_key = os.getenv("together_key") | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | client = Together(api_key=api_key) | 
					
						
						|  |  | 
					
						
						|  | def rate_responses(input_csv, output_csv): | 
					
						
						|  | df = pd.read_csv(input_csv) | 
					
						
						|  |  | 
					
						
						|  | if "prompt" not in df.columns or "response" not in df.columns: | 
					
						
						|  | raise ValueError("CSV must contain 'prompt' and 'response' columns.") | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | if os.path.exists(output_csv): | 
					
						
						|  | output_df = pd.read_csv(output_csv) | 
					
						
						|  | else: | 
					
						
						|  | output_df = df.copy() | 
					
						
						|  | output_df["llama_guard_rating"] = "" | 
					
						
						|  | output_df["timestamp"] = "" | 
					
						
						|  | output_df["processing_time"] = "" | 
					
						
						|  | output_df.to_csv(output_csv, index=False) | 
					
						
						|  |  | 
					
						
						|  | for i, row in df.iterrows(): | 
					
						
						|  | if pd.notna(output_df.loc[i, "llama_guard_rating"]) and output_df.loc[i, "llama_guard_rating"] != "": | 
					
						
						|  | continue | 
					
						
						|  |  | 
					
						
						|  | user_input = row["prompt"] | 
					
						
						|  | user_response = row["response"] | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | chat_input = [ | 
					
						
						|  | {"role": "user", "content": f"{user_input}"}, | 
					
						
						|  | {"role": "assistant", "content": f"{user_response}"} | 
					
						
						|  | ] | 
					
						
						|  |  | 
					
						
						|  | start_time = time.time() | 
					
						
						|  | timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") | 
					
						
						|  |  | 
					
						
						|  | try: | 
					
						
						|  | response = client.chat.completions.create( | 
					
						
						|  | model="meta-llama/Meta-Llama-Guard-3-8B", | 
					
						
						|  | messages=chat_input | 
					
						
						|  | ) | 
					
						
						|  | rating = response.choices[0].message.content.strip() | 
					
						
						|  | except Exception as e: | 
					
						
						|  | rating = f"Error: {str(e)}" | 
					
						
						|  |  | 
					
						
						|  | processing_time = round(time.time() - start_time, 2) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | output_df.loc[i, "llama_guard_rating"] = rating | 
					
						
						|  | output_df.loc[i, "timestamp"] = timestamp | 
					
						
						|  | output_df.loc[i, "processing_time"] = processing_time | 
					
						
						|  |  | 
					
						
						|  | output_df.to_csv(output_csv, index=False) | 
					
						
						|  |  | 
					
						
						|  | print(f"Processed row {i+1}/{len(df)} | Time: {processing_time}s | Saved to {output_csv}") | 
					
						
						|  |  | 
					
						
						|  | print(f"All ratings saved to {output_csv}") | 
					
						
						|  |  | 
					
						
						|  | if __name__ == "__main__": | 
					
						
						|  | if len(sys.argv) < 2: | 
					
						
						|  | print("Usage: python script.py <input_csv>") | 
					
						
						|  | sys.exit(1) | 
					
						
						|  |  | 
					
						
						|  | input_csv = sys.argv[1] | 
					
						
						|  | output_csv = f"rated_{os.path.basename(input_csv)}" | 
					
						
						|  |  | 
					
						
						|  | rate_responses(input_csv, output_csv) | 
					
						
						|  |  |