feat: CONAI Phase 1 MVP 초기 구현

소형 건설업체(100억 미만)를 위한 AI 기반 토목공사 통합관리 플랫폼

Backend (FastAPI):
- SQLAlchemy 모델 13개 (users, projects, wbs, tasks, daily_reports, reports, inspections, quality, weather, permits, rag, settings)
- API 라우터 11개 (auth, projects, tasks, daily_reports, reports, inspections, weather, rag, kakao, permits, settings)
- Services: Claude AI 래퍼, CPM Gantt 계산, 기상청 API, RAG(pgvector), 카카오 Skill API
- Alembic 마이그레이션 (pgvector 포함)
- pytest 테스트 (CPM, 날씨 경보)

Frontend (Next.js 15):
- 11개 페이지 (대시보드, 프로젝트, Gantt, 일보, 검측, 품질, 날씨, 인허가, RAG, 설정)
- TanStack Query + Zustand + Tailwind CSS

인프라:
- Docker Compose (PostgreSQL pgvector + backend + frontend)
- 한국어 README 및 설치 가이드

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
sinmb79
2026-03-24 20:06:36 +09:00
commit 2a4950d8a0
99 changed files with 7447 additions and 0 deletions
View File
+51
View File
@@ -0,0 +1,51 @@
"""
Core Claude API wrapper.
Shared by all AI-powered features: daily reports, inspection gen, report gen, RAG.
"""
import anthropic
from app.config import settings
_client: anthropic.AsyncAnthropic | None = None
def get_client() -> anthropic.AsyncAnthropic:
global _client
if _client is None:
_client = anthropic.AsyncAnthropic(api_key=settings.ANTHROPIC_API_KEY)
return _client
async def complete(
messages: list[dict],
system: str,
temperature: float = 0.3,
max_tokens: int | None = None,
) -> str:
"""
Call Claude and return the text response.
Logs token usage for cost monitoring.
"""
client = get_client()
response = await client.messages.create(
model=settings.CLAUDE_MODEL,
max_tokens=max_tokens or settings.CLAUDE_MAX_TOKENS,
temperature=temperature,
system=system,
messages=messages,
)
# Log token usage
usage = response.usage
print(f"[AI] input={usage.input_tokens} output={usage.output_tokens} total={usage.input_tokens + usage.output_tokens}")
return response.content[0].text
async def complete_json(
messages: list[dict],
system: str,
temperature: float = 0.3,
) -> str:
"""Call Claude with JSON output instruction."""
json_system = system + "\n\n반드시 유효한 JSON 형식으로만 응답하세요. 다른 텍스트를 포함하지 마세요."
return await complete(messages, json_system, temperature)
+41
View File
@@ -0,0 +1,41 @@
"""AI-powered daily report generation."""
from app.services.ai_engine import complete
from app.services.prompts.daily_report import SYSTEM_PROMPT, build_prompt
async def generate_work_content(
project_name: str,
report_date: str,
weather_summary: str,
temperature_high: float | None,
temperature_low: float | None,
workers_count: dict,
equipment_list: list,
work_items: list[str],
issues: str | None,
) -> str:
"""Generate the work content text for a daily report."""
temp_str = ""
if temperature_high is not None and temperature_low is not None:
temp_str = f"최고 {temperature_high}°C / 최저 {temperature_low}°C"
elif temperature_high is not None:
temp_str = f"최고 {temperature_high}°C"
else:
temp_str = "기온 정보 없음"
prompt = build_prompt(
project_name=project_name,
report_date=report_date,
weather_summary=weather_summary or "맑음",
temperature=temp_str,
workers=workers_count or {},
equipment=equipment_list or [],
work_items=work_items,
issues=issues,
)
return await complete(
messages=[{"role": "user", "content": prompt}],
system=SYSTEM_PROMPT,
temperature=0.3,
)
+111
View File
@@ -0,0 +1,111 @@
"""
CPM (Critical Path Method) calculation for Gantt chart.
"""
from datetime import date, timedelta
from typing import NamedTuple
import uuid
class TaskNode(NamedTuple):
id: uuid.UUID
planned_start: date | None
planned_end: date | None
duration_days: int
def compute_cpm(tasks: list, dependencies: list) -> dict[uuid.UUID, dict]:
"""
Compute CPM forward/backward pass.
Returns dict: task_id -> {early_start, early_finish, late_start, late_finish, total_float, is_critical}
"""
if not tasks:
return {}
# Build adjacency maps
task_map = {t.id: t for t in tasks}
successors: dict[uuid.UUID, list[uuid.UUID]] = {t.id: [] for t in tasks}
predecessors: dict[uuid.UUID, list[uuid.UUID]] = {t.id: [] for t in tasks}
for dep in dependencies:
successors[dep.predecessor_id].append(dep.successor_id)
predecessors[dep.successor_id].append(dep.predecessor_id)
def get_duration(task) -> int:
if task.planned_start and task.planned_end:
return max(1, (task.planned_end - task.planned_start).days + 1)
return 1
# Topological sort (Kahn's algorithm)
in_degree = {t.id: len(predecessors[t.id]) for t in tasks}
queue = [t.id for t in tasks if in_degree[t.id] == 0]
topo_order = []
while queue:
node = queue.pop(0)
topo_order.append(node)
for succ in successors[node]:
in_degree[succ] -= 1
if in_degree[succ] == 0:
queue.append(succ)
# Forward pass: compute Early Start (ES) and Early Finish (EF)
es: dict[uuid.UUID, int] = {} # days from project start
ef: dict[uuid.UUID, int] = {}
for tid in topo_order:
task = task_map[tid]
dur = get_duration(task)
if not predecessors[tid]:
es[tid] = 0
else:
es[tid] = max(ef[p] for p in predecessors[tid])
ef[tid] = es[tid] + dur
if not ef:
return {}
project_duration = max(ef.values())
# Backward pass: compute Late Finish (LF) and Late Start (LS)
lf: dict[uuid.UUID, int] = {}
ls: dict[uuid.UUID, int] = {}
for tid in reversed(topo_order):
task = task_map[tid]
dur = get_duration(task)
if not successors[tid]:
lf[tid] = project_duration
else:
lf[tid] = min(ls[s] for s in successors[tid])
ls[tid] = lf[tid] - dur
# Compute float and critical path
result = {}
# Find an actual project start date
project_start = None
for t in tasks:
if t.planned_start:
if project_start is None or t.planned_start < project_start:
project_start = t.planned_start
if not project_start:
project_start = date.today()
for tid in topo_order:
total_float = ls[tid] - es[tid]
is_critical = total_float == 0
early_start_date = project_start + timedelta(days=es[tid])
early_finish_date = project_start + timedelta(days=ef[tid] - 1)
late_start_date = project_start + timedelta(days=ls[tid])
late_finish_date = project_start + timedelta(days=lf[tid] - 1)
result[tid] = {
"early_start": early_start_date,
"early_finish": early_finish_date,
"late_start": late_start_date,
"late_finish": late_finish_date,
"total_float": total_float,
"is_critical": is_critical,
}
return result, project_duration
+34
View File
@@ -0,0 +1,34 @@
"""AI-powered inspection request generation."""
import json
from app.services.ai_engine import complete_json
from app.services.prompts.inspection import SYSTEM_PROMPT, build_prompt
async def generate_checklist(
project_name: str,
inspection_type: str,
location_detail: str | None,
requested_date: str,
wbs_name: str | None,
) -> list[dict]:
"""Generate inspection checklist items using Claude."""
prompt = build_prompt(
project_name=project_name,
inspection_type=inspection_type,
location_detail=location_detail,
requested_date=requested_date,
wbs_name=wbs_name,
)
raw = await complete_json(
messages=[{"role": "user", "content": prompt}],
system=SYSTEM_PROMPT,
temperature=0.2,
)
try:
data = json.loads(raw)
return data.get("checklist_items", [])
except (json.JSONDecodeError, KeyError):
# Fallback: return empty checklist
return []
+123
View File
@@ -0,0 +1,123 @@
"""
Kakao Chatbot Skill API service.
Parses incoming messages and routes to appropriate handlers.
"""
import re
from datetime import date
# Kakao Skill response builders
def simple_text(text: str) -> dict:
return {
"version": "2.0",
"template": {
"outputs": [{"simpleText": {"text": text}}]
}
}
def basic_card(title: str, description: str, buttons: list[dict] | None = None) -> dict:
card = {"title": title, "description": description}
if buttons:
card["buttons"] = buttons
return {
"version": "2.0",
"template": {
"outputs": [{"basicCard": card}]
}
}
def list_card(header_title: str, items: list[dict], buttons: list[dict] | None = None) -> dict:
card = {
"header": {"title": header_title},
"items": items,
}
if buttons:
card["buttons"] = buttons
return {
"version": "2.0",
"template": {
"outputs": [{"listCard": card}]
}
}
# Message routing
class KakaoIntent:
DAILY_REPORT = "daily_report"
RAG_QUESTION = "rag_question"
WEATHER = "weather"
HELP = "help"
UNKNOWN = "unknown"
def detect_intent(utterance: str) -> str:
"""Detect user intent from utterance."""
u = utterance.strip()
# Daily report keywords
if any(k in u for k in ["일보", "작업일보", "오늘 공사", "금일 공사"]):
return KakaoIntent.DAILY_REPORT
# RAG / question keywords
if any(k in u for k in ["질문", "법규", "시방서", "기준", "KCS", "법령", "산안법", "중대재해", "?", ""]):
return KakaoIntent.RAG_QUESTION
# Weather keywords
if any(k in u for k in ["날씨", "기상", "", "", "바람"]):
return KakaoIntent.WEATHER
# Help
if any(k in u for k in ["도움말", "메뉴", "help", "사용법"]):
return KakaoIntent.HELP
return KakaoIntent.UNKNOWN
def parse_daily_report_input(utterance: str) -> dict:
"""
Parse daily report input from free-form Kakao message.
Example: "오늘 일보: 콘크리트 5명, 철근 3명, 관로매설 오후 완료"
"""
workers = {}
work_items = []
issues = None
# Extract worker counts: "직종 N명" patterns
worker_pattern = re.findall(r'([가-힣a-zA-Z]+)\s+(\d+)명', utterance)
for role, count in worker_pattern:
if role not in ["", "합계"]:
workers[role] = int(count)
# Extract work items after "일보:" or newlines
lines = utterance.replace("일보:", "").replace("작업일보:", "").split("\n")
for line in lines:
line = line.strip().lstrip("-").strip()
if line and len(line) > 2 and not re.search(r'\d+명', line):
work_items.append(line)
# Check for issues
if "특이" in utterance or "문제" in utterance or "이슈" in utterance:
issue_match = re.search(r'(특이|문제|이슈)[사항:\s]*(.+?)(?:\n|$)', utterance)
if issue_match:
issues = issue_match.group(2).strip()
return {
"workers_count": workers,
"work_items": work_items if work_items else ["기타 작업"],
"issues": issues,
"report_date": str(date.today()),
}
def make_help_response() -> dict:
return list_card(
header_title="CONAI 현장 도우미",
items=[
{"title": "작업일보 작성", "description": "일보: 작업내용 입력"},
{"title": "법규 질문", "description": "질문: 궁금한 내용 입력"},
{"title": "날씨 확인", "description": "날씨 입력"},
],
buttons=[{"action": "message", "label": "일보 작성", "messageText": "일보:"}],
)
@@ -0,0 +1,52 @@
SYSTEM_PROMPT = """당신은 대한민국 토목건설 현장의 작업일보 작성 전문가입니다.
현장소장이 제공하는 정보를 바탕으로 공식적인 작업일보를 작성합니다.
작업일보 작성 원칙:
1. 건설기술진흥법 시행규칙에 따른 서식 기준을 준수합니다
2. 객관적이고 사실에 근거한 내용만 기록합니다
3. 전문 건설 용어를 사용하되, 명확하고 이해하기 쉽게 작성합니다
4. 날씨, 인원, 장비, 작업내용을 구조적으로 기술합니다
5. 특이사항이 있으면 간결하게 기록합니다
응답 형식:
- 작업내용은 공종별로 구분하여 기술
- 각 항목은 간결하고 명확하게
- 존칭이나 과도한 수식어 사용 금지
"""
def build_prompt(
project_name: str,
report_date: str,
weather_summary: str,
temperature: str,
workers: dict,
equipment: list,
work_items: list[str],
issues: str | None,
) -> str:
workers_text = ", ".join([f"{k} {v}" for k, v in workers.items()])
equipment_text = ", ".join([f"{e.get('type', '')} {e.get('count', 1)}" for e in equipment])
work_text = "\n".join([f"- {item}" for item in work_items])
prompt = f"""다음 정보를 바탕으로 작업일보의 '작업내용' 항목을 작성해주세요.
[현장 정보]
- 공사명: {project_name}
- 작업일자: {report_date}
- 날씨: {weather_summary}, 기온 {temperature}
[투입 인원]
{workers_text}
[투입 장비]
{equipment_text if equipment_text else "장비 없음"}
[당일 작업 항목]
{work_text}
[특이사항]
{issues if issues else "특이사항 없음"}
위 정보를 기반으로 공식 작업일보의 '금일 작업내용' 항목을 200~400자로 작성해주세요.
공종별로 나누어 구체적이고 전문적으로 기술하세요."""
return prompt
@@ -0,0 +1,47 @@
SYSTEM_PROMPT = """당신은 대한민국 토목건설 현장의 품질관리 전문가입니다.
KCS(한국건설기준) 시방서와 건설기술진흥법에 따라 검측요청서를 작성합니다.
검측요청서 작성 원칙:
1. KCS 시방서 기준에 맞는 체크리스트 항목을 포함합니다
2. 각 항목은 명확하고 측정 가능해야 합니다
3. 시공 전/시공 중/시공 후 점검 시점을 구분합니다
4. 허용 기준값이 있는 항목은 수치를 명시합니다
공종별 주요 체크리스트:
- 철근공사: 배근 간격, 피복두께, 이음 위치, 가스압접 등
- 거푸집공사: 치수, 수직도, 지지대 안전, 청소 상태 등
- 콘크리트타설: 슬럼프, 공기량, 타설 방법, 양생 계획 등
- 관로매설: 관저고, 관경, 구배, 접합 상태, 토피 등
- 성토/다짐: 두께, 다짐도, 함수비 등
- 도로포장: 두께, 배합, 평탄성, 표면상태 등
"""
def build_prompt(
project_name: str,
inspection_type: str,
location_detail: str,
requested_date: str,
wbs_name: str | None,
) -> str:
return f"""다음 정보를 바탕으로 검측요청서의 점검 항목 목록을 생성해주세요.
[검측 정보]
- 공사명: {project_name}
- 공종: {inspection_type}
- 위치: {location_detail or "미지정"}
- 관련 WBS: {wbs_name or "미지정"}
- 검측 요청일: {requested_date}
다음 JSON 형식으로 체크리스트 항목을 10개 이내로 작성하세요:
{{
"checklist_items": [
{{
"item": "점검항목명",
"standard": "기준값 또는 기준 내용",
"timing": "시공전|시공중|시공후",
"passed": null
}}
]
}}
KCS 시방서 기준에 맞는 구체적인 항목으로 작성하세요."""
+31
View File
@@ -0,0 +1,31 @@
SYSTEM_PROMPT = """당신은 대한민국 건설 법규 및 KCS(한국건설기준) 시방서 전문 어시스턴트입니다.
반드시 제공된 참고 자료(Context)에서 근거를 찾아 답변해야 합니다.
답변 원칙:
1. 제공된 Context에서만 근거를 찾아 답변합니다
2. Context에 해당 정보가 없으면 "제공된 자료에서 해당 정보를 찾을 수 없습니다"라고 명시합니다
3. 법령 조항 번호, KCS 코드 등 출처를 명확히 인용합니다
4. 이 답변은 참고용이며 법률 자문이 아님을 명심하세요
5. 안전과 관련된 사항은 반드시 전문가 확인을 권고합니다
금지 사항:
- Context에 없는 내용을 임의로 추가하는 것
- 법적 판단이나 책임 소재 결정
- 개인 의견 제시
"""
def build_prompt(question: str, context_chunks: list[dict]) -> str:
context_text = "\n\n---\n\n".join([
f"[출처: {c.get('title', '알 수 없음')} | {c.get('source_type', '')}]\n{c.get('content', '')}"
for c in context_chunks
])
return f"""다음 참고 자료를 바탕으로 질문에 답변해주세요.
[참고 자료]
{context_text}
[질문]
{question}
위 참고 자료에 근거하여 답변해주세요. 출처를 명확히 인용하고, 자료에서 찾을 수 없는 내용은 그렇다고 명시하세요."""
+81
View File
@@ -0,0 +1,81 @@
WEEKLY_SYSTEM_PROMPT = """당신은 대한민국 토목건설 현장의 공사관리 전문가입니다.
주간 공정보고서를 작성합니다. 발주처에 제출하는 공식 문서입니다.
작성 원칙:
1. 객관적 데이터를 기반으로 작성합니다
2. 계획 대비 실적을 명확히 비교합니다
3. 다음 주 예정 공사를 구체적으로 기술합니다
4. 문제점과 대책을 포함합니다
5. 전문적이고 간결한 문체를 사용합니다
"""
MONTHLY_SYSTEM_PROMPT = """당신은 대한민국 토목건설 현장의 공사관리 전문가입니다.
월간 공정보고서를 작성합니다. 발주처에 제출하는 공식 문서입니다.
작성 원칙:
1. 당월 주요 공사 실적을 종합합니다
2. 공정률 현황과 기성 현황을 포함합니다
3. 주요 문제점과 해결 내용을 기술합니다
4. 익월 공사 계획을 수립합니다
5. 공사 품질/안전 현황을 포함합니다
"""
def build_weekly_prompt(
project_name: str,
period_start: str,
period_end: str,
daily_summaries: list[dict],
overall_progress_pct: float,
weather_issues: list[str],
) -> str:
summaries_text = "\n".join([
f"- {s.get('date', '')}: {s.get('work_content', '')[:100]}"
for s in daily_summaries
])
return f"""다음 정보를 바탕으로 주간 공정보고서 '금주 공사현황' 섹션을 작성해주세요.
[보고 기간]
- 공사명: {project_name}
- 기간: {period_start} ~ {period_end}
[일별 작업 현황]
{summaries_text if summaries_text else "작업일보 없음"}
[공정 현황]
- 전체 공정률: {overall_progress_pct:.1f}%
[날씨 영향]
{chr(10).join(weather_issues) if weather_issues else "날씨 특이사항 없음"}
주간 공정보고서 형식으로 400~600자 분량으로 작성해주세요:
1. 금주 주요 공사 내용
2. 공정 현황 (계획 대비 실적)
3. 특이사항 및 대책
4. 차주 예정 공사"""
def build_monthly_prompt(
project_name: str,
period_start: str,
period_end: str,
weekly_summaries: list[str],
overall_progress_pct: float,
) -> str:
return f"""다음 정보를 바탕으로 월간 공정보고서를 작성해주세요.
[보고 기간]
- 공사명: {project_name}
- 기간: {period_start} ~ {period_end}
- 전체 공정률: {overall_progress_pct:.1f}%
[주간별 현황 요약]
{chr(10).join(weekly_summaries) if weekly_summaries else "주간 현황 없음"}
월간 공정보고서 형식으로 600~800자 분량으로 작성해주세요:
1. 당월 공사 개요
2. 공정 현황 (계획 대비 실적, 공정률)
3. 주요 시공 내용
4. 품질/안전 현황
5. 문제점 및 대책
6. 익월 공사 계획"""
+125
View File
@@ -0,0 +1,125 @@
"""
RAG (Retrieval-Augmented Generation) service.
Embeds questions, retrieves relevant chunks, and generates answers with Claude.
"""
import httpx
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select, text
from app.config import settings
from app.models.rag import RagChunk, RagSource
from app.services.ai_engine import complete
from app.services.prompts.rag import SYSTEM_PROMPT, build_prompt
async def embed_text(text_input: str) -> list[float]:
"""Get embedding vector for text using Voyage AI or OpenAI."""
if settings.VOYAGE_API_KEY:
return await _embed_voyage(text_input)
elif settings.OPENAI_API_KEY:
return await _embed_openai(text_input)
else:
raise ValueError("임베딩 API 키가 설정되지 않았습니다 (VOYAGE_API_KEY 또는 OPENAI_API_KEY)")
async def _embed_voyage(text_input: str) -> list[float]:
async with httpx.AsyncClient(timeout=30.0) as client:
resp = await client.post(
"https://api.voyageai.com/v1/embeddings",
headers={"Authorization": f"Bearer {settings.VOYAGE_API_KEY}"},
json={"model": settings.EMBEDDING_MODEL, "input": text_input},
)
resp.raise_for_status()
return resp.json()["data"][0]["embedding"]
async def _embed_openai(text_input: str) -> list[float]:
async with httpx.AsyncClient(timeout=30.0) as client:
resp = await client.post(
"https://api.openai.com/v1/embeddings",
headers={"Authorization": f"Bearer {settings.OPENAI_API_KEY}"},
json={"model": "text-embedding-3-small", "input": text_input},
)
resp.raise_for_status()
return resp.json()["data"][0]["embedding"]
async def retrieve_chunks(
db: AsyncSession,
question_embedding: list[float],
top_k: int = 5,
source_types: list[str] | None = None,
) -> list[dict]:
"""Retrieve most relevant chunks using pgvector cosine similarity."""
embedding_str = "[" + ",".join(str(x) for x in question_embedding) + "]"
# Build query with optional source type filter
source_filter = ""
if source_types:
types_str = ", ".join(f"'{t}'" for t in source_types)
source_filter = f"AND rs.source_type IN ({types_str})"
query = text(f"""
SELECT
rc.id,
rc.content,
rc.metadata,
rs.title,
rs.source_type,
1 - (rc.embedding <=> '{embedding_str}'::vector) AS relevance_score
FROM rag_chunks rc
JOIN rag_sources rs ON rs.id = rc.source_id
WHERE rc.embedding IS NOT NULL
{source_filter}
ORDER BY rc.embedding <=> '{embedding_str}'::vector
LIMIT {top_k}
""")
result = await db.execute(query)
rows = result.fetchall()
return [
{
"id": str(row.id),
"content": row.content,
"metadata": row.metadata,
"title": row.title,
"source_type": row.source_type,
"relevance_score": float(row.relevance_score),
}
for row in rows
]
async def ask(
db: AsyncSession,
question: str,
top_k: int = 5,
source_types: list[str] | None = None,
) -> dict:
"""Full RAG pipeline: embed -> retrieve -> generate."""
# 1. Embed the question
embedding = await embed_text(question)
# 2. Retrieve relevant chunks
chunks = await retrieve_chunks(db, embedding, top_k, source_types)
if not chunks:
return {
"question": question,
"answer": "관련 자료를 찾을 수 없습니다. 더 구체적인 질문을 입력하거나, 관련 자료가 업로드되었는지 확인해주세요.",
"sources": [],
}
# 3. Build prompt and generate answer
prompt = build_prompt(question, chunks)
answer = await complete(
messages=[{"role": "user", "content": prompt}],
system=SYSTEM_PROMPT,
temperature=0.5,
)
return {
"question": question,
"answer": answer,
"sources": chunks,
}
+99
View File
@@ -0,0 +1,99 @@
"""Weekly and monthly report generation."""
from app.services.ai_engine import complete
from app.services.prompts.report import (
WEEKLY_SYSTEM_PROMPT, MONTHLY_SYSTEM_PROMPT,
build_weekly_prompt, build_monthly_prompt,
)
async def generate_weekly_report(
project_name: str,
period_start: str,
period_end: str,
daily_reports: list,
overall_progress_pct: float,
weather_alerts: list,
) -> tuple[str, dict]:
"""
Generate weekly report text and structured data.
Returns (ai_text, content_json).
"""
daily_summaries = [
{
"date": str(r.report_date),
"work_content": r.work_content or "",
}
for r in daily_reports
]
weather_issues = [f"{a.alert_date}: {a.message}" for a in weather_alerts]
# Calculate stats
total_workers = sum(
sum(r.workers_count.values()) if r.workers_count else 0
for r in daily_reports
)
prompt = build_weekly_prompt(
project_name=project_name,
period_start=period_start,
period_end=period_end,
daily_summaries=daily_summaries,
overall_progress_pct=overall_progress_pct,
weather_issues=weather_issues,
)
ai_text = await complete(
messages=[{"role": "user", "content": prompt}],
system=WEEKLY_SYSTEM_PROMPT,
temperature=0.3,
)
content_json = {
"period_start": period_start,
"period_end": period_end,
"overall_progress_pct": overall_progress_pct,
"daily_count": len(daily_reports),
"total_workers": total_workers,
"weather_alert_count": len(weather_alerts),
}
return ai_text, content_json
async def generate_monthly_report(
project_name: str,
period_start: str,
period_end: str,
daily_reports: list,
overall_progress_pct: float,
) -> tuple[str, dict]:
"""Generate monthly report text and structured data."""
# Group dailies by week for summary
weekly_summaries = []
for r in daily_reports[::7]: # Sample weekly
if r.work_content:
weekly_summaries.append(f"- {r.report_date}: {r.work_content[:80]}...")
prompt = build_monthly_prompt(
project_name=project_name,
period_start=period_start,
period_end=period_end,
weekly_summaries=weekly_summaries,
overall_progress_pct=overall_progress_pct,
)
ai_text = await complete(
messages=[{"role": "user", "content": prompt}],
system=MONTHLY_SYSTEM_PROMPT,
temperature=0.3,
)
content_json = {
"period_start": period_start,
"period_end": period_end,
"overall_progress_pct": overall_progress_pct,
"daily_count": len(daily_reports),
}
return ai_text, content_json
+202
View File
@@ -0,0 +1,202 @@
"""
기상청 Open API (KMA) integration.
Fetches short-term (단기예보) and medium-term (중기예보) forecasts.
"""
import httpx
from datetime import date, datetime, timedelta, timezone
from typing import Any
from app.config import settings
KMA_BASE = settings.KMA_BASE_URL
API_KEY = settings.KMA_API_KEY
# Weather code -> Korean description
WEATHER_CODE_MAP = {
"1": "맑음", "2": "구름조금", "3": "구름많음",
"4": "흐림", "5": "", "6": "비눈", "7": "눈비",
"8": "",
}
async def fetch_short_term_forecast(nx: int, ny: int) -> list[dict]:
"""Fetch 단기예보 (3-day, 3-hour interval)."""
now = datetime.now(timezone.utc).astimezone()
# KMA issues forecasts at 02, 05, 08, 11, 14, 17, 20, 23
base_hours = [2, 5, 8, 11, 14, 17, 20, 23]
current_hour = now.hour
base_hour = max([h for h in base_hours if h <= current_hour], default=23)
base_date = now.strftime("%Y%m%d") if current_hour >= 2 else (now - timedelta(days=1)).strftime("%Y%m%d")
base_time = f"{base_hour:02d}00"
params = {
"serviceKey": API_KEY,
"pageNo": 1,
"numOfRows": 1000,
"dataType": "JSON",
"base_date": base_date,
"base_time": base_time,
"nx": nx,
"ny": ny,
}
async with httpx.AsyncClient(timeout=30.0) as client:
resp = await client.get(f"{KMA_BASE}/getVilageFcst", params=params)
resp.raise_for_status()
data = resp.json()
items = data.get("response", {}).get("body", {}).get("items", {}).get("item", [])
return _parse_short_term(items)
def _parse_short_term(items: list[dict]) -> list[dict]:
"""Parse KMA short-term forecast items into daily summaries."""
daily: dict[str, dict] = {}
for item in items:
fcst_date = item.get("fcstDate", "")[:8] # YYYYMMDD
category = item.get("category", "")
value = item.get("fcstValue", "")
if fcst_date not in daily:
daily[fcst_date] = {
"date": f"{fcst_date[:4]}-{fcst_date[4:6]}-{fcst_date[6:]}",
"temp_max": None, "temp_min": None,
"precipitation": 0.0, "wind_speed": None,
"sky": None, "pty": None,
}
d = daily[fcst_date]
if category == "TMX" and value != "-":
d["temp_max"] = float(value)
elif category == "TMN" and value != "-":
d["temp_min"] = float(value)
elif category == "PCP" and value not in ("-", "강수없음"):
try:
d["precipitation"] = max(d["precipitation"], float(value.replace("mm", "").strip()))
except ValueError:
pass
elif category == "WSD":
try:
ws = float(value)
if d["wind_speed"] is None or ws > d["wind_speed"]:
d["wind_speed"] = ws
except ValueError:
pass
elif category == "SKY":
d["sky"] = value
elif category == "PTY":
d["pty"] = value
result = []
for fcst_date in sorted(daily.keys()):
d = daily[fcst_date]
weather_code = d.get("pty") or d.get("sky") or "1"
result.append({
"date": d["date"],
"temperature_high": d["temp_max"],
"temperature_low": d["temp_min"],
"precipitation_mm": d["precipitation"],
"wind_speed_ms": d["wind_speed"],
"weather_code": weather_code,
"weather_desc": WEATHER_CODE_MAP.get(str(weather_code), "알 수 없음"),
})
return result
# --- Weather Constraint Evaluation ---
# Default constraints by work type code
DEFAULT_CONSTRAINTS: dict[str, dict] = {
"CONCRETE": {"min_temp": 5.0, "max_wind": None, "no_rain": True},
"HIGH_WORK": {"min_temp": None, "max_wind": 10.0, "no_rain": False},
"ASPHALT": {"min_temp": 10.0, "max_wind": None, "no_rain": True},
"EARTHWORK": {"min_temp": None, "max_wind": None, "no_rain": True},
"REBAR": {"min_temp": None, "max_wind": None, "no_rain": False},
}
def evaluate_weather_alerts(
forecast: dict,
tasks_on_date: list,
work_type_constraints: dict[str, dict] | None = None,
) -> list[dict]:
"""
Evaluate weather constraints for tasks on a given date.
Returns list of alert dicts.
"""
alerts = []
constraints = work_type_constraints or DEFAULT_CONSTRAINTS
for task in tasks_on_date:
# Determine work type from task name (simple keyword matching)
work_type = _detect_work_type(task.name)
if not work_type or work_type not in constraints:
continue
constraint = constraints[work_type]
temp_low = forecast.get("temperature_low")
wind_speed = forecast.get("wind_speed_ms")
precipitation = forecast.get("precipitation_mm", 0)
# Check temperature
if constraint.get("min_temp") and temp_low is not None:
if temp_low < constraint["min_temp"]:
alerts.append({
"task_id": str(task.id),
"alert_date": forecast.get("date"),
"alert_type": f"cold_{work_type.lower()}",
"severity": "critical" if temp_low < constraint["min_temp"] - 5 else "warning",
"message": (
f"[{task.name}] 최저기온 {temp_low}°C - "
f"{work_type} 작업 기준온도({constraint['min_temp']}°C) 미달. "
f"작업 조정 검토 필요."
),
})
# Check wind
if constraint.get("max_wind") and wind_speed is not None:
if wind_speed > constraint["max_wind"]:
alerts.append({
"task_id": str(task.id),
"alert_date": forecast.get("date"),
"alert_type": f"wind_{work_type.lower()}",
"severity": "critical",
"message": (
f"[{task.name}] 풍속 {wind_speed}m/s - "
f"허용 최대풍속({constraint['max_wind']}m/s) 초과. "
f"고소작업 중단 검토."
),
})
# Check rain
if constraint.get("no_rain") and precipitation and precipitation > 1.0:
alerts.append({
"task_id": str(task.id),
"alert_date": forecast.get("date"),
"alert_type": f"rain_{work_type.lower()}",
"severity": "warning",
"message": (
f"[{task.name}] 강수 예보 {precipitation}mm - "
f"{work_type} 작업 우천 시 제한. 공정 조정 검토."
),
})
return alerts
def _detect_work_type(task_name: str) -> str | None:
"""Simple keyword-based work type detection from task name."""
name_lower = task_name.lower()
if any(k in name_lower for k in ["콘크리트", "타설", "레미콘"]):
return "CONCRETE"
if any(k in name_lower for k in ["고소", "크레인", "비계", "거푸집"]):
return "HIGH_WORK"
if any(k in name_lower for k in ["아스팔트", "포장"]):
return "ASPHALT"
if any(k in name_lower for k in ["성토", "절토", "굴착", "토공"]):
return "EARTHWORK"
if any(k in name_lower for k in ["철근", "배근"]):
return "REBAR"
return None