Complete Guide to Embedding AI in Your Python Admin Panel
Use cloud-based AI APIs from providers like OpenAI, Anthropic, Google, or Azure. This is the fastest and most powerful approach.
import openai
from flask import Flask, request, jsonify
app = Flask(__name__)
openai.api_key = "your-api-key-here"
class AIAdminAssistant:
def __init__(self):
self.conversation_history = []
def ask_ai(self, user_query, context=None):
"""Ask AI copilot a question with optional context"""
messages = [
{"role": "system", "content": """You are an AI assistant for the hx7 admin panel.
You help administrators manage apps, users, security, and operations.
You can execute commands, provide insights, and make recommendations."""}
]
# Add context about current admin state
if context:
messages.append({
"role": "system",
"content": f"Current system state: {context}"
})
# Add conversation history
messages.extend(self.conversation_history)
# Add user query
messages.append({"role": "user", "content": user_query})
# Call OpenAI API
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
temperature=0.7,
max_tokens=1000
)
ai_response = response.choices[0].message.content
# Save to history
self.conversation_history.append({"role": "user", "content": user_query})
self.conversation_history.append({"role": "assistant", "content": ai_response})
return ai_response
# Example usage
ai_assistant = AIAdminAssistant()
@app.route('/ai/query', methods=['POST'])
def ai_query():
data = request.json
query = data.get('query')
# Get system context
context = {
"active_users": 1543,
"apps_online": 8,
"security_alerts": 3
}
response = ai_assistant.ask_ai(query, context)
return jsonify({"response": response})
import anthropic
class ClaudeAdminAssistant:
def __init__(self, api_key):
self.client = anthropic.Anthropic(api_key=api_key)
def analyze_security_event(self, event_data):
"""AI analyzes security events and suggests actions"""
prompt = f"""Analyze this security event and provide:
1. Severity level (Low/Medium/High/Critical)
2. Potential threat type
3. Recommended actions
Event Data: {event_data}
"""
message = self.client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
messages=[{"role": "user", "content": prompt}]
)
return message.content[0].text
def suggest_user_action(self, user_behavior):
"""AI suggests whether to ban, watch, or clear a user"""
prompt = f"""Based on this user behavior, suggest action:
User Activity:
- Failed login attempts: {user_behavior.get('failed_logins')}
- Suspicious IPs: {user_behavior.get('suspicious_ips')}
- Unusual usage pattern: {user_behavior.get('unusual_pattern')}
- Account age: {user_behavior.get('account_age')} days
Provide: Risk Score (0-100), Action (MONITOR/BAN/CLEAR), Reasoning
"""
message = self.client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=512,
messages=[{"role": "user", "content": prompt}]
)
return message.content[0].text
Run AI models locally using libraries like scikit-learn, TensorFlow, or PyTorch. Best for privacy and cost control.
from sklearn.ensemble import IsolationForest
import numpy as np
import pickle
class AnomalyDetector:
def __init__(self):
self.model = IsolationForest(
contamination=0.1, # 10% of data is anomalous
random_state=42
)
self.is_trained = False
def train(self, historical_data):
"""Train on normal user behavior"""
# Features: login_frequency, failed_attempts, session_duration, etc.
features = np.array(historical_data)
self.model.fit(features)
self.is_trained = True
# Save model
with open('anomaly_model.pkl', 'wb') as f:
pickle.dump(self.model, f)
def detect_anomaly(self, user_behavior):
"""Detect if user behavior is suspicious"""
if not self.is_trained:
raise Exception("Model not trained yet")
features = np.array([user_behavior])
prediction = self.model.predict(features)
# -1 = anomaly, 1 = normal
is_suspicious = prediction[0] == -1
# Get anomaly score
score = self.model.score_samples(features)[0]
return {
"is_suspicious": is_suspicious,
"anomaly_score": abs(score),
"action": "INVESTIGATE" if is_suspicious else "NORMAL"
}
# Example usage
detector = AnomalyDetector()
# Train with historical data (run once)
historical_behaviors = [
[5, 0, 120, 3], # [logins_per_day, failed_attempts, avg_session_min, apps_used]
[4, 0, 95, 2],
[6, 1, 110, 4],
# ... more normal behavior data
]
detector.train(historical_behaviors)
# Detect suspicious user
new_user = [50, 25, 5, 1] # Suspicious: many logins, many failures, short sessions
result = detector.detect_anomaly(new_user)
print(result) # {"is_suspicious": True, "anomaly_score": 0.85, "action": "INVESTIGATE"}
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
import pandas as pd
class ChurnPredictor:
def __init__(self):
self.model = RandomForestClassifier(n_estimators=100, random_state=42)
def train(self, user_data):
"""Train model to predict churn"""
df = pd.DataFrame(user_data)
# Features
X = df[[
'days_since_last_login',
'total_apps_owned',
'support_tickets',
'avg_session_duration',
'payment_failures'
]]
# Target: did_churn (0 or 1)
y = df['did_churn']
# Train/test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Train model
self.model.fit(X_train, y_train)
# Evaluate
accuracy = self.model.score(X_test, y_test)
print(f"Model accuracy: {accuracy:.2%}")
def predict_churn_risk(self, user_features):
"""Predict if user is likely to churn"""
features = pd.DataFrame([user_features])
# Get prediction probability
churn_probability = self.model.predict_proba(features)[0][1]
# Classify risk level
if churn_probability > 0.7:
risk_level = "HIGH"
elif churn_probability > 0.4:
risk_level = "MEDIUM"
else:
risk_level = "LOW"
return {
"churn_probability": churn_probability,
"risk_level": risk_level,
"recommended_action": self._get_retention_strategy(risk_level)
}
def _get_retention_strategy(self, risk_level):
strategies = {
"HIGH": "Send discount coupon + personal email from CEO",
"MEDIUM": "Offer feature upgrade or extended trial",
"LOW": "Continue normal engagement"
}
return strategies[risk_level]
# Example usage
predictor = ChurnPredictor()
# Train with historical data
user_data = [
{'days_since_last_login': 3, 'total_apps_owned': 5, 'support_tickets': 0,
'avg_session_duration': 45, 'payment_failures': 0, 'did_churn': 0},
{'days_since_last_login': 30, 'total_apps_owned': 1, 'support_tickets': 3,
'avg_session_duration': 5, 'payment_failures': 2, 'did_churn': 1},
# ... more data
]
predictor.train(user_data)
# Predict for new user
at_risk_user = {
'days_since_last_login': 15,
'total_apps_owned': 2,
'support_tickets': 1,
'avg_session_duration': 10,
'payment_failures': 1
}
result = predictor.predict_churn_risk(at_risk_user)
print(result)
Combine API-based AI for complex tasks with local ML for fast, privacy-sensitive operations.
class HybridAISystem:
def __init__(self, openai_key, claude_key):
# API-based AI for complex reasoning
self.gpt = OpenAIAssistant(openai_key)
self.claude = ClaudeAssistant(claude_key)
# Local ML for fast predictions
self.anomaly_detector = AnomalyDetector()
self.churn_predictor = ChurnPredictor()
self.fraud_detector = FraudDetector()
def analyze_user(self, user_id, user_data):
"""Complete user analysis using hybrid approach"""
# FAST: Local ML for real-time checks (< 10ms)
anomaly_check = self.anomaly_detector.detect_anomaly(user_data['behavior'])
fraud_check = self.fraud_detector.check_transaction(user_data['last_purchase'])
churn_risk = self.churn_predictor.predict_churn_risk(user_data['metrics'])
# If suspicious, use powerful AI for deep analysis
if anomaly_check['is_suspicious'] or fraud_check['is_fraud']:
ai_analysis = self.claude.deep_security_analysis({
"user_id": user_id,
"anomaly_score": anomaly_check['anomaly_score'],
"fraud_indicators": fraud_check['indicators'],
"user_history": user_data['history']
})
return {
"risk_level": "HIGH",
"quick_checks": {
"anomaly": anomaly_check,
"fraud": fraud_check
},
"ai_analysis": ai_analysis,
"recommended_action": "MANUAL_REVIEW"
}
# Normal user: just return fast ML results
return {
"risk_level": "LOW",
"churn_risk": churn_risk,
"anomaly_check": anomaly_check,
"recommended_action": "NONE"
}
def smart_support_ticket(self, ticket):
"""Route tickets intelligently"""
# Local ML: Quick categorization
category = self.ticket_classifier.classify(ticket['content'])
if category in ['simple', 'faq']:
# AI generates automatic response
response = self.gpt.generate_support_response(ticket)
return {"type": "AUTO_RESOLVE", "response": response}
else:
# Complex issue: AI analyzes and routes to human
analysis = self.claude.analyze_support_ticket(ticket)
return {
"type": "ESCALATE",
"analysis": analysis,
"route_to": analysis['suggested_team']
}
Use Local ML for: Real-time anomaly detection, fraud checks, spam filtering, simple classification
Use API AI for: Complex reasoning, natural language, code analysis, strategic recommendations
Run powerful open-source models like Llama, Mistral, or Phi locally on your servers.
import requests
import json
class LocalLLM:
def __init__(self, model="llama2"):
self.base_url = "http://localhost:11434"
self.model = model
def chat(self, prompt, system_prompt=None):
"""Chat with locally hosted LLM"""
messages = []
if system_prompt:
messages.append({"role": "system", "content": system_prompt})
messages.append({"role": "user", "content": prompt})
response = requests.post(
f"{self.base_url}/api/chat",
json={
"model": self.model,
"messages": messages,
"stream": False
}
)
return response.json()['message']['content']
def analyze_logs(self, log_entries):
"""AI analyzes error logs"""
system_prompt = """You are a system administrator AI.
Analyze log entries and identify root causes of issues."""
prompt = f"Analyze these logs and explain what went wrong:\n\n{log_entries}"
return self.chat(prompt, system_prompt)
# Setup: Install Ollama, then run: ollama pull llama2
llm = LocalLLM(model="llama2")
# Example usage
logs = """
[ERROR] Database connection timeout after 30s
[ERROR] Failed to connect to 192.168.1.50:5432
[WARN] Connection pool exhausted
[ERROR] Database connection timeout after 30s
"""
analysis = llm.analyze_logs(logs)
print(analysis)
# Output: "The database server at 192.168.1.50 is unreachable or unresponsive.
# The connection pool is exhausted, suggesting high load or connection leaks..."
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
class LocalNLPModels:
def __init__(self):
# Sentiment analysis
self.sentiment = pipeline("sentiment-analysis")
# Text classification
self.classifier = pipeline("zero-shot-classification")
# Named entity recognition
self.ner = pipeline("ner", grouped_entities=True)
def analyze_user_feedback(self, feedback_text):
"""Analyze user feedback sentiment and topics"""
# Sentiment
sentiment = self.sentiment(feedback_text)[0]
# Classify topic
topics = ["bug report", "feature request", "complaint", "praise"]
classification = self.classifier(feedback_text, topics)
# Extract entities (product names, features mentioned)
entities = self.ner(feedback_text)
return {
"sentiment": sentiment['label'],
"confidence": sentiment['score'],
"topic": classification['labels'][0],
"entities": entities
}
# Example usage
nlp = LocalNLPModels()
feedback = "The DDoS tool keeps crashing on Windows 11. Very frustrating!"
result = nlp.analyze_user_feedback(feedback)
print(result)
# {
# "sentiment": "NEGATIVE",
# "confidence": 0.95,
# "topic": "bug report",
# "entities": [{"entity": "PRODUCT", "word": "DDoS tool"}, ...]
# }
CPU Only: Mistral 7B, Phi-2 (4-8GB RAM)
GPU Recommended: Llama 2 13B, Mixtral (16-32GB VRAM)
High-End GPU: Llama 2 70B, GPT-J (80GB+ VRAM)
Use enterprise platforms like Azure AI, Google Vertex AI, or AWS SageMaker for production-grade AI infrastructure.
from azure.ai.textanalytics import TextAnalyticsClient
from azure.core.credentials import AzureKeyCredential
import boto3
class EnterpriseAI:
def __init__(self):
# Azure AI
self.azure_text = TextAnalyticsClient(
endpoint="https://your-resource.cognitiveservices.azure.com/",
credential=AzureKeyCredential("your-key")
)
# AWS services
self.comprehend = boto3.client('comprehend', region_name='us-east-1')
self.rekognition = boto3.client('rekognition', region_name='us-east-1')
def analyze_text_azure(self, text):
"""Enterprise-grade text analysis with Azure"""
documents = [{"id": "1", "text": text}]
# Sentiment analysis
sentiment_result = self.azure_text.analyze_sentiment(documents)[0]
# Key phrase extraction
key_phrases = self.azure_text.extract_key_phrases(documents)[0]
# Entity recognition
entities = self.azure_text.recognize_entities(documents)[0]
return {
"sentiment": sentiment_result.sentiment,
"confidence_scores": {
"positive": sentiment_result.confidence_scores.positive,
"neutral": sentiment_result.confidence_scores.neutral,
"negative": sentiment_result.confidence_scores.negative
},
"key_phrases": [phrase.text for phrase in key_phrases.key_phrases],
"entities": [{"text": e.text, "category": e.category} for e in entities.entities]
}
def detect_pii(self, text):
"""Detect personally identifiable information (PII)"""
response = self.comprehend.detect_pii_entities(
Text=text,
LanguageCode='en'
)
pii_found = []
for entity in response['Entities']:
pii_found.append({
"type": entity['Type'],
"text": text[entity['BeginOffset']:entity['EndOffset']],
"confidence": entity['Score']
})
return {
"has_pii": len(pii_found) > 0,
"pii_entities": pii_found
}
def analyze_user_screenshot(self, image_bytes):
"""Analyze screenshots for content moderation"""
response = self.rekognition.detect_moderation_labels(
Image={'Bytes': image_bytes},
MinConfidence=80
)
inappropriate_content = []
for label in response['ModerationLabels']:
inappropriate_content.append({
"label": label['Name'],
"confidence": label['Confidence'],
"parent_label": label.get('ParentName', 'None')
})
return {
"is_safe": len(inappropriate_content) == 0,
"flagged_content": inappropriate_content
}
# Example usage
ai = EnterpriseAI()
# Analyze support ticket
ticket_text = "I'm having trouble with my payment. Card ending in 4532."
analysis = ai.analyze_text_azure(ticket_text)
pii_check = ai.detect_pii(ticket_text)
print(f"Sentiment: {analysis['sentiment']}")
print(f"PII Detected: {pii_check['has_pii']}")
from flask import Flask, render_template, jsonify, request
import openai
from datetime import datetime, timedelta
app = Flask(__name__)
class AISecurityDashboard:
def __init__(self):
openai.api_key = "your-key"
self.anomaly_detector = AnomalyDetector()
def get_security_summary(self):
"""Generate AI summary of security events"""
# Get recent security events
events = self.get_recent_events()
# Create prompt for AI
prompt = f"""Analyze these security events from the last 24 hours and provide:
1. Overall security status (Good/Warning/Critical)
2. Top 3 threats detected
3. Recommended immediate actions
Events:
{events}
Be concise and actionable."""
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a cybersecurity analyst."},
{"role": "user", "content": prompt}
]
)
return response.choices[0].message.content
def get_recent_events(self):
# Query your database for recent security events
return """
- 15 failed login attempts from IP 192.168.1.100
- 3 unusual API calls from new user account
- 1 malware detection in uploaded file
- 8 successful logins from Russia (VPN detected)
"""
def check_user_risk(self, user_id):
"""Real-time user risk assessment"""
user_data = self.get_user_data(user_id)
# Local ML for fast check
ml_result = self.anomaly_detector.detect_anomaly(user_data['features'])
if ml_result['is_suspicious']:
# Deep AI analysis for suspicious users
ai_analysis = self.ai_deep_analysis(user_data)
return {
"risk_level": "HIGH",
"ml_score": ml_result['anomaly_score'],
"ai_analysis": ai_analysis,
"auto_action": "TEMPORARY_SUSPENSION"
}
return {"risk_level": "LOW", "ml_score": ml_result['anomaly_score']}
@app.route('/dashboard/security')
def security_dashboard():
dashboard = AISecurityDashboard()
summary = dashboard.get_security_summary()
return render_template('security.html', summary=summary)
@app.route('/api/user/risk/')
def user_risk(user_id):
dashboard = AISecurityDashboard()
risk = dashboard.check_user_risk(user_id)
return jsonify(risk)
import anthropic
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
class SmartTicketRouter:
def __init__(self):
self.claude = anthropic.Anthropic(api_key="your-key")
self.quick_classifier = None
self.train_quick_classifier()
def train_quick_classifier(self):
"""Train fast local classifier for common tickets"""
# Training data
tickets = [
"I forgot my password", "Can't login to my account",
"App keeps crashing", "Getting error code 500",
"Want to cancel subscription", "Refund request",
"Feature doesn't work", "How do I use X feature"
]
labels = ["auth", "auth", "technical", "technical",
"billing", "billing", "support", "support"]
self.vectorizer = TfidfVectorizer()
X = self.vectorizer.fit_transform(tickets)
self.quick_classifier = MultinomialNB()
self.quick_classifier.fit(X, labels)
def route_ticket(self, ticket_content, user_info):
"""Intelligently route support tickets"""
# Fast local classification
X = self.vectorizer.transform([ticket_content])
quick_category = self.quick_classifier.predict(X)[0]
# Simple categories get auto-responses
if quick_category == "auth" and "password" in ticket_content.lower():
return {
"route": "AUTO_RESOLVE",
"response": "Click 'Forgot Password' on the login page.",
"category": "auth"
}
# Complex tickets get AI analysis
message = self.claude.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=500,
messages=[{
"role": "user",
"content": f"""Analyze this support ticket and provide:
1. Category (technical/billing/abuse/feature_request)
2. Priority (low/medium/high/urgent)
3. Suggested team to handle it
4. Brief summary for the team
Ticket: {ticket_content}
User: Premium customer, 3 apps owned, 2 years old account
"""
}]
)
ai_response = message.content[0].text
return {
"route": "HUMAN_AGENT",
"category": quick_category,
"ai_analysis": ai_response,
"priority": self.extract_priority(ai_response)
}
def extract_priority(self, ai_text):
"""Extract priority from AI response"""
ai_text_lower = ai_text.lower()
if "urgent" in ai_text_lower:
return "urgent"
elif "high" in ai_text_lower:
return "high"
elif "medium" in ai_text_lower:
return "medium"
return "low"
# Usage
router = SmartTicketRouter()
ticket = "Your DDoS tool deleted all my files! I lost important data!"
result = router.route_ticket(ticket, user_info={"tier": "premium"})
print(f"Route: {result['route']}")
print(f"Priority: {result.get('priority')}")
print(f"Analysis: {result.get('ai_analysis')}")
import numpy as np
from sklearn.ensemble import RandomForestClassifier
import openai
class FraudDetectionSystem:
def __init__(self):
self.model = RandomForestClassifier(n_estimators=100)
self.train_model()
openai.api_key = "your-key"
def train_model(self):
"""Train fraud detection model"""
# Features: transaction_amount, account_age_days, failed_attempts_24h,
# different_ips_24h, unusual_location, velocity_score
X_train = np.array([
[50, 500, 0, 1, 0, 0.2], # Normal
[100, 730, 1, 1, 0, 0.3], # Normal
[500, 2, 15, 8, 1, 0.9], # Fraud
[1000, 1, 25, 12, 1, 0.95], # Fraud
# ... more training data
])
y_train = np.array([0, 0, 1, 1]) # 0=normal, 1=fraud
self.model.fit(X_train, y_train)
def check_transaction(self, transaction):
"""Multi-layer fraud detection"""
# Layer 1: Fast ML model (< 10ms)
features = np.array([[
transaction['amount'],
transaction['account_age_days'],
transaction['failed_attempts_24h'],
transaction['different_ips_24h'],
int(transaction['unusual_location']),
transaction['velocity_score']
]])
ml_prediction = self.model.predict(features)[0]
ml_probability = self.model.predict_proba(features)[0][1]
# Layer 2: If ML is suspicious, use AI for deep analysis
if ml_prediction == 1 or ml_probability > 0.6:
ai_analysis = self.ai_fraud_analysis(transaction)
return {
"is_fraud": True,
"confidence": ml_probability,
"ml_score": ml_probability,
"ai_analysis": ai_analysis,
"action": "BLOCK_AND_REVIEW"
}
# Layer 3: Medium risk gets rule-based checks
if 0.3 < ml_probability < 0.6:
return {
"is_fraud": False,
"confidence": ml_probability,
"action": "ADDITIONAL_VERIFICATION",
"require_2fa": True
}
# Low risk: approve
return {
"is_fraud": False,
"confidence": 1 - ml_probability,
"action": "APPROVE"
}
def ai_fraud_analysis(self, transaction):
"""Use GPT-4 for sophisticated fraud pattern analysis"""
prompt = f"""Analyze this potentially fraudulent transaction:
Amount: ${transaction['amount']}
Account age: {transaction['account_age_days']} days
Recent failed attempts: {transaction['failed_attempts_24h']}
Different IPs in 24h: {transaction['different_ips_24h']}
Location unusual: {transaction['unusual_location']}
User history: {transaction.get('history', 'New user')}
Provide:
1. Fraud likelihood (0-100%)
2. Specific indicators
3. Recommended action
"""
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a fraud detection expert."},
{"role": "user", "content": prompt}
]
)
return response.choices[0].message.content
# Usage
fraud_system = FraudDetectionSystem()
suspicious_transaction = {
"amount": 999,
"account_age_days": 1,
"failed_attempts_24h": 18,
"different_ips_24h": 10,
"unusual_location": True,
"velocity_score": 0.92,
"history": "Account created today, immediately purchased premium"
}
result = fraud_system.check_transaction(suspicious_transaction)
print(f"Fraud detected: {result['is_fraud']}")
print(f"Action: {result['action']}")
Tier 1 - Real-time Operations (Local ML):
Tier 2 - Complex Analysis (API AI):
Tier 3 - Specialized Tasks:
# API-based AI
openai==1.12.0
anthropic==0.18.1
# Local ML
scikit-learn==1.4.0
numpy==1.26.3
pandas==2.2.0
joblib==1.3.2
# Deep Learning (optional)
torch==2.2.0
transformers==4.37.2
# NLP
nltk==3.8.1
spacy==3.7.2
# Web framework
flask==3.0.1
flask-cors==4.0.0
# Database
sqlalchemy==2.0.25
pymongo==4.6.1
# Caching
redis==5.0.1
# Enterprise AI (optional)
azure-ai-textanalytics==5.3.0
boto3==1.34.34
google-cloud-aiplatform==1.42.1
# Utilities
python-dotenv==1.0.1
requests==2.31.0
# Create virtual environment
python -m venv venv
source venv/bin/activate # On Windows: venv\Scripts\activate
# Install dependencies
pip install -r requirements.txt
# Download NLP models
python -m spacy download en_core_web_sm
python -m nltk.downloader popular
# Set environment variables
export OPENAI_API_KEY="your-key"
export ANTHROPIC_API_KEY="your-key"
export AWS_ACCESS_KEY_ID="your-key"
export AWS_SECRET_ACCESS_KEY="your-secret"
| Method | Setup Cost | Monthly Cost | Best For |
|---|---|---|---|
| OpenAI API | $0 | $50-500 | Quick start, general AI |
| Claude API | $0 | $50-500 | Security analysis, reasoning |
| Local ML | $500-2000 | $0 | High volume, privacy-sensitive |
| Self-hosted LLM | $3000-10000 | $200-800 | Complete control, offline |
| Hybrid Approach | $500-2000 | $100-300 | Optimal balance ⭐ |
| Enterprise AI | $1000-5000 | $500-5000 | Large scale, compliance needs |
Scenario 1 - Pure API:
• 500 AI queries/day × $0.01 = $150/month
Scenario 2 - Hybrid (Recommended):
• Local ML: 5000 checks/day = Free
• API for 50 complex cases/day × $0.02 = $30/month
• Total: $30/month + $1000 setup
ROI: Hybrid pays for itself in 4-5 months!
# Step 1: Install dependencies (1 minute)
# pip install openai scikit-learn flask
# Step 2: Create your first AI-powered endpoint (5 minutes)
from flask import Flask, request, jsonify
import openai
app = Flask(__name__)
openai.api_key = "sk-your-key-here"
@app.route('/ai/analyze-user', methods=['POST'])
def analyze_user():
"""AI analyzes user behavior and suggests action"""
data = request.json
prompt = f"""Analyze this user and suggest action (APPROVE/MONITOR/BAN):
Failed logins: {data['failed_logins']}
Account age: {data['account_age_days']} days
Apps owned: {data['apps_owned']}
Recent behavior: {data['behavior']}
Respond in JSON format with: {{"action": "...", "reason": "...", "risk_score": 0-100}}
"""
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}],
temperature=0.3
)
return jsonify({"ai_response": response.choices[0].message.content})
# Step 3: Test it! (1 minute)
if __name__ == '__main__':
app.run(debug=True, port=5000)
# Test with curl:
# curl -X POST http://localhost:5000/ai/analyze-user \
# -H "Content-Type: application/json" \
# -d '{"failed_logins": 10, "account_age_days": 2, "apps_owned": 0, "behavior": "Trying multiple accounts"}'