-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathagents.py
More file actions
323 lines (274 loc) · 8.47 KB
/
agents.py
File metadata and controls
323 lines (274 loc) · 8.47 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
import os
import json
from typing import Dict, List
from openai import OpenAI
# Project branding
PROJECT_NAME = "SafeScroll"
PROJECT_SLUG = "safescroll"
# OpenAI client (expects OPENAI_API_KEY env variable)
# Keep the placeholder; user must set the env var in production.
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY", "YOUR_OPENAI_API_KEY_HERE"))
# Default model used for agents (change if you'd like)
LLM_MODEL = "gpt-4o-mini"
def _run_json_agent(system_prompt: str, user_content: str, max_tokens: int = 400) -> Dict:
"""
Helper to call an LLM agent and parse JSON response.
Returns an empty dict on parse failure.
"""
try:
resp = client.chat.completions.create(
model=LLM_MODEL,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_content},
],
response_format={"type": "json_object"},
max_tokens=max_tokens,
)
except Exception as e:
# Return a sensible error structure so callers can handle missing output
return {"error": f"LLM request failed: {e}"}
# Extract content in a defensive way
try:
content = resp.choices[0].message.content
except Exception:
# Unexpected response shape
return {"error": "unexpected LLM response format"}
# Try to parse JSON strictly, fall back to heuristic extraction
try:
return json.loads(content)
except Exception:
# attempt to recover JSON substring
try:
idx = content.rfind("{")
if idx != -1:
return json.loads(content[idx:])
except Exception:
# best-effort: return raw content for debugging
return {"error": "failed to parse JSON", "raw": content}
def underage_risk_agent(user_profile: Dict, posts: List[Dict]) -> Dict:
"""
Agent: Estimate underage risk based on declared age and content style.
Input:
- user_profile: {"user_id": "...", "age": int, ...}
- posts: list of {"post_id": "...", "text": "...", ...}
Output JSON structure:
{
"is_minor_suspected": true,
"underage_misrepresentation_risk": 0,
"reason": ""
}
"""
system_prompt = f"""
You are an Underage Risk Detection Agent for {PROJECT_NAME}.
You receive:
- A user profile (age, account_type, created_at)
- A sample of their posts
Task:
1. Estimate if the user appears UNDER 18 or ADULT based on content style and declared age.
2. Output a risk score for "underage_misrepresentation" from 0 to 100.
3. Provide reasoning.
Output strict JSON:
{{
"is_minor_suspected": true,
"underage_misrepresentation_risk": 0,
"reason": ""
}}
"""
user_str = json.dumps(user_profile, ensure_ascii=False)
posts_str = json.dumps(posts, ensure_ascii=False)
user_content = f"USER_PROFILE:\n{user_str}\n\nSAMPLE_POSTS:\n{posts_str}"
return _run_json_agent(system_prompt, user_content)
def content_risk_agent(posts: List[Dict]) -> Dict:
"""
Agent: Analyze content for bullying, self-harm, sexual exploitation, substance abuse.
Input:
- posts: list of {"post_id": "...", "text": "...", ...}
Output JSON structure:
{
"per_post": [
{
"post_id": "",
"text": "",
"bullying_risk": "none",
"self_harm_risk": "none",
"sexual_exploitation_risk": "none",
"substance_abuse_risk": "none",
"notes": ""
}
],
"overall": {
"bullying_risk": "none",
"self_harm_risk": "none",
"sexual_exploitation_risk": "none",
"substance_abuse_risk": "none",
"summary": ""
}
}
"""
system_prompt = f"""
You are a Content Safety Agent working for {PROJECT_NAME}.
You receive a list of posts from one user. For each post, you should detect:
- bullying
- self_harm
- sexual_exploitation_or_grooming
- substance_abuse
Then aggregate into overall risk levels.
Overall risk levels should be "none", "low", "medium", or "high".
Output this strict JSON:
{{
"per_post": [
{{
"post_id": "",
"text": "",
"bullying_risk": "none",
"self_harm_risk": "none",
"sexual_exploitation_risk": "none",
"substance_abuse_risk": "none",
"notes": ""
}}
],
"overall": {{
"bullying_risk": "none",
"self_harm_risk": "none",
"sexual_exploitation_risk": "none",
"substance_abuse_risk": "none",
"summary": ""
}}
}}
"""
posts_str = json.dumps(posts, ensure_ascii=False)
return _run_json_agent(system_prompt, posts_str, max_tokens=1000)
def interaction_risk_agent(user_profile: Dict, interactions: List[Dict]) -> Dict:
"""
Agent: Analyze direct messages / interactions for grooming-like patterns and power imbalance.
Input:
- user_profile: profile for the audited user
- interactions: list of {"interaction_id","from_user","to_user","text","from_age","to_age",...}
Output JSON structure:
{
"grooming_risk": "none",
"evidence": [
{
"interaction_id": "",
"text_snippet": "",
"comment": ""
}
],
"summary": ""
}
"""
system_prompt = f"""
You are an Interaction Risk Agent for {PROJECT_NAME}.
You receive:
- A user profile
- A list of direct message interactions between this user and others.
Each interaction includes from_user, to_user, text, and age information (in metadata).
Task:
1. Detect if there are signs of grooming or sexual exploitation risk.
2. Consider age differences (older messaging younger).
3. Output a "grooming_risk" level: "none", "low", "medium", "high", "critical".
4. Provide key evidence snippets.
Output strict JSON:
{{
"grooming_risk": "none",
"evidence": [
{{
"interaction_id": "",
"text_snippet": "",
"comment": ""
}}
],
"summary": ""
}}
"""
payload = {"user_profile": user_profile, "interactions": interactions}
return _run_json_agent(system_prompt, json.dumps(payload, ensure_ascii=False), max_tokens=800)
def policy_violation_agent(policy_text: str, aggregated_findings: Dict) -> Dict:
"""
Agent: Map earlier findings to company policy violations.
Input:
- policy_text: raw company safety policies (string)
- aggregated_findings: dict combining outputs from other agents
Output JSON structure:
{
"violated_sections": [],
"overall_severity": "low",
"recommended_action": "",
"explanation": ""
}
"""
system_prompt = f"""
You are a Policy Violation Agent for {PROJECT_NAME}.
You receive:
- Company safety policies text.
- Aggregated findings from other safety agents about a single user.
Task:
1. Map the findings to specific policy sections that are likely violated.
2. Determine overall severity: "low", "medium", "high", "critical".
3. Recommend an action:
- "monitor"
- "warn"
- "restrict_features"
- "escalate_to_safety_team"
- "temporary_suspension"
4. Provide a short explanation.
Output strict JSON:
{{
"violated_sections": [],
"overall_severity": "low",
"recommended_action": "",
"explanation": ""
}}
"""
payload = {"policies": policy_text, "findings": aggregated_findings}
return _run_json_agent(system_prompt, json.dumps(payload, ensure_ascii=False), max_tokens=600)
def report_generator_agent(
user_profile: Dict,
underage: Dict,
content: Dict,
interactions: Dict,
policy_result: Dict,
) -> Dict:
"""
Agent: Generate a human-readable safety report (markdown-style).
Input:
- structured JSON from the other agents
Output JSON structure:
{
"risk_title": "",
"overall_risk_score": 0,
"risk_summary": "",
"markdown_report": ""
}
"""
system_prompt = f"""
You are a Safety Report Generator Agent for {PROJECT_NAME}.
You receive structured JSON from several safety agents for ONE user:
- underage risk
- content risk
- interaction/grooming risk
- policy violation summary
Task:
1. Produce a clear, human-readable report (markdown-style).
2. Include:
- short user summary
- key risks
- evidence examples
- final recommended action
Output strict JSON:
{{
"risk_title": "",
"overall_risk_score": 0,
"risk_summary": "",
"markdown_report": ""
}}
"""
payload = {
"user_profile": user_profile,
"underage": underage,
"content": content,
"interactions": interactions,
"policy_result": policy_result,
}
return _run_json_agent(system_prompt, json.dumps(payload, ensure_ascii=False), max_tokens=900)