Files
fund-tracer/backend/app/services/assessment_service.py
2026-03-11 16:28:04 +08:00

151 lines
5.7 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
"""Fraud amount assessment and inquiry suggestion generation."""
import logging
from uuid import UUID
import httpx
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from app.core.config import settings
from app.models.transaction import TransactionRecord
from app.models.assessment import FraudAssessment, ConfidenceLevel, ReviewStatus
from app.rules.assessment_rules import classify_transaction
logger = logging.getLogger(__name__)
async def assess_case(case_id: UUID, db: AsyncSession) -> list[FraudAssessment]:
"""Run rule-based assessment on all non-duplicate transactions and generate reasons."""
result = await db.execute(
select(TransactionRecord)
.where(TransactionRecord.case_id == case_id)
.where(TransactionRecord.is_duplicate.is_(False))
.order_by(TransactionRecord.trade_time.asc())
)
transactions = list(result.scalars().all())
assessments: list[FraudAssessment] = []
for tx in transactions:
level, reason, exclude_reason = classify_transaction(tx)
fa = FraudAssessment(
case_id=case_id,
transaction_id=tx.id,
confidence_level=level,
assessed_amount=float(tx.amount) if level != ConfidenceLevel.low else 0,
reason=reason,
exclude_reason=exclude_reason,
review_status=ReviewStatus.pending,
)
db.add(fa)
assessments.append(fa)
await db.flush()
# try to enhance reasons via LLM
if settings.LLM_API_KEY and settings.LLM_API_URL:
for fa in assessments:
try:
enhanced = await _enhance_reason_via_llm(fa, transactions)
if enhanced:
fa.reason = enhanced
except Exception as e:
logger.debug("LLM reason enhancement skipped: %s", e)
await db.flush()
return assessments
async def generate_inquiry_suggestions(case_id: UUID, db: AsyncSession) -> list[str]:
"""Generate interview / inquiry suggestions based on assessment results."""
result = await db.execute(
select(FraudAssessment)
.where(FraudAssessment.case_id == case_id)
.order_by(FraudAssessment.created_at.asc())
)
assessments = list(result.scalars().all())
if not assessments:
return ["暂无分析结果,请先执行案件分析。"]
# try LLM generation
if settings.LLM_API_KEY and settings.LLM_API_URL:
try:
return await _generate_suggestions_via_llm(assessments)
except Exception as e:
logger.debug("LLM suggestions skipped: %s", e)
return _generate_suggestions_rule_based(assessments)
def _generate_suggestions_rule_based(assessments: list[FraudAssessment]) -> list[str]:
suggestions: list[str] = []
pending = [a for a in assessments if a.review_status == ReviewStatus.pending]
medium = [a for a in assessments if a.confidence_level == ConfidenceLevel.medium]
if pending:
suggestions.append(
f"{len(pending)} 笔交易尚未确认,建议逐笔向受害人核实是否受到诱导操作。"
)
if medium:
suggestions.append(
"部分交易置信度为中等,建议追问受害人交易的具体背景和对方的诱导话术。"
)
suggestions.append("是否还有其他未截图的转账记录或 APP 需要补充?")
suggestions.append("涉案金额中是否有已部分追回或返还的款项?")
suggestions.append(
"除了截图所示的 APP 外是否还存在银行柜台、ATM、其他支付平台等转账渠道"
)
return suggestions
async def _enhance_reason_via_llm(fa: FraudAssessment, all_tx: list) -> str | None:
prompt = (
f"这笔交易金额{fa.assessed_amount}元,置信等级{fa.confidence_level.value}"
f"原始认定理由:{fa.reason}"
"请用简洁中文优化认定理由表述,使之适合出现在办案文书中。只返回优化后的理由文字。"
)
async with httpx.AsyncClient(timeout=15) as client:
resp = await client.post(
settings.LLM_API_URL,
headers={"Authorization": f"Bearer {settings.LLM_API_KEY}"},
json={
"model": settings.LLM_MODEL,
"messages": [{"role": "user", "content": prompt}],
"max_tokens": 300,
},
)
resp.raise_for_status()
return resp.json()["choices"][0]["message"]["content"].strip()
async def _generate_suggestions_via_llm(assessments: list[FraudAssessment]) -> list[str]:
summary_lines = []
for a in assessments:
summary_lines.append(
f"- 金额{a.assessed_amount}元, 置信{a.confidence_level.value}, "
f"状态{a.review_status.value}, 理由: {a.reason[:60]}"
)
summary = "\n".join(summary_lines)
prompt = (
"你是一名反诈案件办案助手。以下是某诈骗案件的交易认定摘要:\n"
f"{summary}\n\n"
"请生成5条笔录辅助问询建议帮助民警追问受害人以完善证据链。"
"只返回JSON数组格式的5个字符串。"
)
import json
async with httpx.AsyncClient(timeout=20) as client:
resp = await client.post(
settings.LLM_API_URL,
headers={"Authorization": f"Bearer {settings.LLM_API_KEY}"},
json={
"model": settings.LLM_MODEL,
"messages": [{"role": "user", "content": prompt}],
"max_tokens": 600,
},
)
resp.raise_for_status()
text = resp.json()["choices"][0]["message"]["content"].strip()
return json.loads(text.strip().strip("`").removeprefix("json").strip())