Quick & Easy Ways to Add AI to Your Python Admin Panel
import openai
openai.api_key = "sk-your-api-key-here"
# Simple way - Get AI response
user_input = "Ban all users from Russia who failed login 5+ times"
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are an admin panel AI assistant. Analyze commands and provide actions."},
{"role": "user", "content": user_input}
]
)
ai_answer = response.choices[0].message.content
print(ai_answer)
# Output: "I'll ban users matching: country='Russia' AND failed_logins >= 5. Execute query: UPDATE users SET banned=1 WHERE..."
# Use in admin panel
def analyze_user_behavior(user_data):
prompt = f"Analyze this user: {user_data}. Should I ban, monitor, or approve?"
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo", # Cheaper than GPT-4
messages=[{"role": "user", "content": prompt}]
)
return response.choices[0].message.content
# Example
user = {"failed_logins": 15, "account_age": 1, "suspicious_ips": 8}
decision = analyze_user_behavior(user)
print(decision) # AI recommends action
import anthropic
client = anthropic.Anthropic(api_key="sk-ant-your-key")
# Simple usage
user_activity = "User tried 50 different passwords in 2 minutes from 10 different IPs"
response = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
messages=[
{"role": "user", "content": f"Analyze this activity and tell me if it's an attack: {user_activity}"}
]
)
ai_response = response.content[0].text
print(ai_response)
# Output: "This is clearly a brute force attack. Immediate actions: 1. Ban user, 2. Block IPs..."
# Use for security analysis
def analyze_security_event(event_data):
prompt = f"""Security Event Analysis:
Event: {event_data['type']}
User: {event_data['user_id']}
IP: {event_data['ip']}
Details: {event_data['details']}
Provide: Risk Level (Low/Medium/High/Critical), Threat Type, Recommended Action"""
message = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=512,
messages=[{"role": "user", "content": prompt}]
)
return message.content[0].text
# Example
event = {
"type": "multiple_failed_logins",
"user_id": "user_123",
"ip": "192.168.1.100",
"details": "15 failed attempts in 5 minutes"
}
analysis = analyze_security_event(event)
print(analysis)
import google.generativeai as genai
genai.configure(api_key="your-google-api-key")
model = genai.GenerativeModel('gemini-pro')
# Super simple usage
user_query = "Analyze failed login attempts and suggest security rules"
response = model.generate_content(user_query)
print(response.text)
# Use in admin panel
def get_ai_recommendation(data):
prompt = f"User data: {data}. Recommend action: BAN, MONITOR, or APPROVE?"
response = model.generate_content(prompt)
return response.text
# Example
user_data = "New user, 20 failed logins, VPN detected, suspicious country"
action = get_ai_recommendation(user_data)
print(action) # AI suggests BAN
# Multi-modal: Analyze screenshots!
def analyze_screenshot(image_path):
import PIL.Image
img = PIL.Image.open(image_path)
response = model.generate_content([
"What security issues do you see in this admin panel screenshot?",
img
])
return response.text
import ollama
# Simple usage - runs on YOUR computer!
response = ollama.chat(
model='llama2',
messages=[
{'role': 'user', 'content': 'Analyze this user activity: 50 failed logins in 1 hour'}
]
)
print(response['message']['content'])
# Use in admin panel
def analyze_locally(user_data):
prompt = f"""User Analysis:
Failed Logins: {user_data['failed_logins']}
Account Age: {user_data['account_age']} days
Suspicious IPs: {user_data['suspicious_ips']}
Should I: BAN, MONITOR, or APPROVE? Explain briefly."""
response = ollama.chat(
model='llama2', # or 'mistral', 'codellama', 'phi'
messages=[{'role': 'user', 'content': prompt}]
)
return response['message']['content']
# Example
user = {"failed_logins": 25, "account_age": 1, "suspicious_ips": 12}
decision = analyze_locally(user)
print(decision)
# Stream responses for real-time feedback
def stream_analysis(query):
stream = ollama.chat(
model='llama2',
messages=[{'role': 'user', 'content': query}],
stream=True
)
for chunk in stream:
print(chunk['message']['content'], end='', flush=True)
import requests
API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.1"
headers = {"Authorization": "Bearer hf_your_token"}
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
# Simple usage
user_input = "Analyze: User has 30 failed logins. Should I ban them?"
output = query({"inputs": user_input})
print(output[0]['generated_text'])
# Use in admin panel
def ai_security_check(event_description):
prompt = f"Security Event: {event_description}\nRisk Level (Low/Medium/High):"
result = query({
"inputs": prompt,
"parameters": {"max_new_tokens": 100, "temperature": 0.7}
})
return result[0]['generated_text']
# Example
event = "User from unknown country tried 100 passwords in 5 minutes"
analysis = ai_security_check(event)
print(analysis)
# Sentiment analysis (for support tickets)
sentiment_api = "https://api-inference.huggingface.co/models/distilbert-base-uncased-finetuned-sst-2-english"
def analyze_ticket_sentiment(ticket_text):
result = query({"inputs": ticket_text})
return result # Returns: [{'label': 'NEGATIVE', 'score': 0.95}]
ticket = "This app is terrible! It crashes every time!"
sentiment = analyze_ticket_sentiment(ticket)
print(f"Sentiment: {sentiment[0]['label']}") # NEGATIVE
import cohere
co = cohere.Client('your-api-key')
# Simple usage
response = co.generate(
prompt="Analyze this user behavior and suggest action: User tried 40 passwords in 2 minutes",
max_tokens=200
)
print(response.generations[0].text)
# Use in admin panel
def classify_security_event(event_text):
response = co.classify(
model='embed-english-v2.0',
inputs=[event_text],
examples=[
("50 failed logins from same IP", "attack"),
("User logged in from new device", "suspicious"),
("Normal app usage", "normal"),
("Brute force attempt detected", "attack"),
]
)
return response.classifications[0].prediction
# Example
event = "Multiple password attempts from different countries simultaneously"
classification = classify_security_event(event)
print(f"Event type: {classification}") # "attack"
# Semantic search in logs
def search_similar_events(query, event_log):
response = co.embed(
texts=[query] + event_log,
model='embed-english-v2.0'
)
# Find most similar events (simplified)
embeddings = response.embeddings
# ... calculate similarity and return matches
return "Similar events found in logs"
import replicate
# Simple usage - run any AI model!
output = replicate.run(
"meta/llama-2-70b-chat:latest",
input={
"prompt": "Analyze this security event: 100 failed logins in 1 minute. What should I do?",
"max_tokens": 200
}
)
print(output)
# Use in admin panel
def analyze_with_ai(user_data):
prompt = f"""Security Analysis Request:
User ID: {user_data['id']}
Failed Logins: {user_data['failed_logins']}
Suspicious Activity: {user_data['activity']}
Recommend: BAN / MONITOR / CLEAR"""
output = replicate.run(
"meta/llama-2-13b-chat:latest",
input={"prompt": prompt}
)
return "".join(output)
# Example
user = {
"id": "user_123",
"failed_logins": 45,
"activity": "Multiple IPs, VPN usage, pattern matching bot"
}
recommendation = analyze_with_ai(user)
print(recommendation)
# Image analysis (screenshots, uploads)
def analyze_screenshot(image_url):
output = replicate.run(
"salesforce/blip:latest",
input={"image": image_url, "task": "image_captioning"}
)
return output
from mistralai.client import MistralClient
from mistralai.models.chat_completion import ChatMessage
client = MistralClient(api_key="your-api-key")
# Simple usage
messages = [
ChatMessage(role="user", content="Analyze: User failed 30 logins. Ban or monitor?")
]
response = client.chat(
model="mistral-medium",
messages=messages
)
print(response.choices[0].message.content)
# Use in admin panel
def security_analysis(event_data):
prompt = f"""Security Event Analysis:
Type: {event_data['type']}
User: {event_data['user']}
Details: {event_data['details']}
Provide: Risk Score (0-100), Action (BAN/MONITOR/ALLOW), Reason"""
messages = [ChatMessage(role="user", content=prompt)]
response = client.chat(
model="mistral-small", # or "mistral-medium" or "mistral-large"
messages=messages
)
return response.choices[0].message.content
# Example
event = {
"type": "brute_force_attempt",
"user": "attacker_user",
"details": "50 failed logins from Russian IP using Tor"
}
analysis = security_analysis(event)
print(analysis)
# Function calling (execute admin commands)
def ai_execute_command(natural_language_command):
tools = [
{
"type": "function",
"function": {
"name": "ban_user",
"description": "Ban a user from the system",
"parameters": {
"type": "object",
"properties": {
"user_id": {"type": "string"},
"reason": {"type": "string"}
}
}
}
}
]
messages = [ChatMessage(role="user", content=natural_language_command)]
response = client.chat(
model="mistral-large-latest",
messages=messages,
tools=tools,
tool_choice="auto"
)
return response
| Method | Difficulty | Cost/Month | Quality | Speed | Best Use |
|---|---|---|---|---|---|
| OpenAI GPT | ⭐ Easy | $15-300 | ⭐⭐⭐⭐⭐ | Fast | General AI tasks |
| Claude | ⭐ Easy | $20-200 | ⭐⭐⭐⭐⭐ | Fast | Security analysis |
| Google Gemini | ⭐ Easy | $0-10 | ⭐⭐⭐⭐ | Very Fast | FREE tier, images |
| Ollama | ⭐⭐ Medium | $0 FREE | ⭐⭐⭐ | Slower | Privacy, offline |
| Hugging Face | ⭐⭐ Medium | $0-50 | ⭐⭐⭐ | Slow (free) | Specific tasks |
| Cohere | ⭐ Easy | $0-60 | ⭐⭐⭐ | Fast | Classification |
| Replicate | ⭐ Easy | $10-100 | ⭐⭐⭐⭐ | Medium | Any AI model |
| Mistral AI | ⭐ Easy | $6-100 | ⭐⭐⭐⭐ | Very Fast | Fast & cheap |
Use: OpenAI GPT-3.5 Turbo + Ollama
Use: Ollama Only (100% Local)
Use: Google Gemini + Mistral AI
Use: Claude Sonnet 4 + OpenAI GPT-4
# For OpenAI (Recommended):
pip install openai
# For Claude:
pip install anthropic
# For Local/Free:
# Download from ollama.ai, then:
ollama pull llama2
# For Google Gemini:
pip install google-generativeai
# ai_helper.py
import openai
class AdminAI:
def __init__(self, api_key):
openai.api_key = api_key
def analyze(self, data, question):
"""Simple AI analysis"""
prompt = f"Data: {data}\n\nQuestion: {question}\n\nAnswer:"
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}]
)
return response.choices[0].message.content
# Usage in your admin panel
ai = AdminAI("your-api-key")
# Example 1: Analyze user
user_data = {"failed_logins": 30, "account_age": 2}
result = ai.analyze(user_data, "Should I ban this user?")
print(result)
# Example 2: Analyze security event
event = {"type": "brute_force", "ip": "192.168.1.100"}
result = ai.analyze(event, "What type of attack is this?")
print(result)
# In your admin panel code:
from ai_helper import AdminAI
ai = AdminAI("sk-your-key")
# When viewing user details:
def check_user(user_id):
user = get_user_from_database(user_id)
# Get AI recommendation
ai_advice = ai.analyze(
data=user,
question="Analyze this user's activity. Should I ban, monitor, or approve?"
)
# Show to admin
print(f"User: {user['username']}")
print(f"AI Recommendation: {ai_advice}")
return ai_advice
# When reviewing security events:
def analyze_security_event(event_id):
event = get_event_from_database(event_id)
ai_analysis = ai.analyze(
data=event,
question="What kind of attack is this? How severe? What should I do?"
)
return ai_analysis