from openai import OpenAIclient = OpenAI( api_key="sk-voidai-your_key_here", base_url="https://api.voidai.app/v1")response = client.moderations.create( input="This is a friendly message about programming.")result = response.results[0]print(f"Flagged: {result.flagged}")if result.flagged: for category, flagged in result.categories.items(): if flagged: score = result.category_scores[category] print(f" {category}: {score:.4f}")
texts = [ "Hello, how are you today?", "I love learning new programming languages!", "Let's build something amazing together."]response = client.moderations.create(input=texts)for i, result in enumerate(response.results): print(f"Text {i}: {'Flagged' if result.flagged else 'OK'}")
from openai import OpenAIclient = OpenAI( api_key="sk-voidai-your_key_here", base_url="https://api.voidai.app/v1")def is_content_safe(text, threshold=0.5): """Check if content is safe based on moderation scores.""" response = client.moderations.create(input=text) result = response.results[0] if result.flagged: return False # Check if any category score exceeds threshold for category, score in result.category_scores.items(): if score > threshold: return False return Truedef safe_chat(user_message): """Chat with content filtering.""" # Check user input if not is_content_safe(user_message): return "I'm sorry, I can't respond to that message." # Get AI response response = client.chat.completions.create( model="gpt-4", messages=[{"role": "user", "content": user_message}] ) ai_message = response.choices[0].message.content # Check AI output if not is_content_safe(ai_message): return "I generated an inappropriate response. Please try again." return ai_message# Usageresult = safe_chat("Tell me about machine learning")print(result)
The moderation API is designed to help filter content but should not be the only safety measure in your application. Consider implementing additional safeguards for production systems.