feat: 쇼츠 품질 모듈 4종 파이프라인 연결

- MotionEngine: stock_fetcher에서 kenburns 대신 7패턴 모션 적용
- HookOptimizer: 스크립트 추출 후 훅 점수 평가 및 최적화
- CaptionTemplates: 코너별 자막 템플릿 매핑 (AI인사이트→brand_4thpath 등)
- ResilientAssembler: 클립별 개별 인코딩 + GPU 자동 감지
- video_assembler work_dir mkdir 누락 버그 수정

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
JOUNGWOOK KWON
2026-04-06 10:02:14 +09:00
parent fb5e6ddbdf
commit 93b2d3a264
4 changed files with 41 additions and 14 deletions

View File

@@ -47,10 +47,20 @@ CAPTION_TEMPLATES = {
# Corner → caption template mapping
CORNER_CAPTION_MAP = {
# 현재 블로그 코너
'AI인사이트': 'brand_4thpath',
'여행맛집': 'tiktok_viral',
'스타트업': 'hormozi',
'제품리뷰': 'hormozi',
'생활꿀팁': 'tiktok_viral',
'앱추천': 'brand_4thpath',
'재테크절약': 'hormozi',
'재테크': 'hormozi',
'팩트체크': 'brand_4thpath',
# 레거시 코너 (하위 호환)
'쉬운세상': 'hormozi',
'숨은보물': 'tiktok_viral',
'바이브리포트': 'hormozi',
'팩트체크': 'brand_4thpath',
'한컷': 'tiktok_viral',
'웹소설': 'brand_4thpath',
}

View File

@@ -278,6 +278,10 @@ def fetch_clips(
expressions = manifest.get('expressions', [])
char_pose = manifest.get('pose', manifest.get('character', {}).get('default_pose', ''))
# MotionEngine: 정지 이미지에 7가지 모션 패턴 적용 (직전 2개 제외 자동 선택)
from shorts.motion_engine import MotionEngine
motion = MotionEngine()
result_clips: list[Path] = []
# 1. 사용자 제공 비디오 클립
@@ -286,23 +290,24 @@ def fetch_clips(
if _prepare_clip(Path(user_clip), out):
result_clips.append(out)
# 2. 사용자 제공 이미지 → Ken Burns
# 2. 사용자 제공 이미지 → MotionEngine (7패턴 자동 선택)
for i, user_img in enumerate(manifest.get('user_images', [])[:max_clips]):
if len(result_clips) >= max_clips:
break
out = clips_dir / f'clip_img_{i+1:02d}.mp4'
if _kenburns_image(Path(user_img), out):
result_clips.append(out)
result_path = motion.apply(str(user_img), duration=6.0, output_path=str(out))
if result_path:
result_clips.append(Path(result_path))
# 3. 캐릭터 에셋 + 배경 합성
background = manifest.get('background', '')
if background and Path(background).exists() and len(result_clips) < max_clips:
# 배경 이미지 → Ken Burns 클립 (표정별 합성)
# 배경 이미지 → MotionEngine 클립 (표정별 합성)
for seg_idx, expr_png in enumerate(expressions[:3]):
if len(result_clips) >= max_clips:
break
out_bg = clips_dir / f'clip_bg_{seg_idx+1:02d}.mp4'
if _kenburns_image(Path(background), out_bg):
if motion.apply(str(background), duration=6.0, output_path=str(out_bg)):
# 표정 오버레이
if expr_png and Path(expr_png).exists():
out_char = clips_dir / f'clip_char_{seg_idx+1:02d}.mp4'
@@ -374,8 +379,9 @@ def fetch_clips(
while len(result_clips) < min_clips:
stock_idx += 1
out = clips_dir / f'clip_fallback_{stock_idx:02d}.mp4'
if _kenburns_image(fallback_img, out):
result_clips.append(out)
result_path = motion.apply(str(fallback_img), duration=6.0, output_path=str(out))
if result_path:
result_clips.append(Path(result_path))
else:
break

View File

@@ -339,7 +339,7 @@ def assemble(
tmp_cleanup = work_dir is None
if work_dir is None:
work_dir = output_dir / f'_work_{timestamp}'
work_dir.mkdir(parents=True, exist_ok=True)
work_dir.mkdir(parents=True, exist_ok=True)
try:
# ── 루프 최적화: 클립 목록 끝에 첫 클립 추가 ──────────────

View File

@@ -160,7 +160,8 @@ def produce(article: dict, dry_run: bool = False, cfg: Optional[dict] = None) ->
from shorts.stock_fetcher import fetch_clips
from shorts.tts_engine import generate_tts
from shorts.caption_renderer import render_captions
from shorts.video_assembler import assemble
from shorts.video_assembler import ResilientAssembler
from shorts.hook_optimizer import HookOptimizer
if cfg is None:
cfg = _load_config()
@@ -192,6 +193,14 @@ def produce(article: dict, dry_run: bool = False, cfg: Optional[dict] = None) ->
manifest = resolve(article, script=script, cfg=cfg)
result.steps_completed.append('script_extract')
# ── STEP 1.5: Hook Optimization ─────────────────────────
hook_optimizer = HookOptimizer(threshold=70)
original_hook = script.get('hook', '')
optimized_hook = hook_optimizer.optimize(original_hook, article)
if optimized_hook != original_hook:
script['hook'] = optimized_hook
logger.info(f'[{article_id}] 훅 최적화: "{original_hook[:20]}""{optimized_hook[:20]}"')
# ── STEP 2: Visual Sourcing ──────────────────────────────
logger.info(f'[{article_id}] STEP 2: Visual Sourcing')
clips = fetch_clips(script, manifest, clips_dir, ts, cfg=cfg)
@@ -227,12 +236,14 @@ def produce(article: dict, dry_run: bool = False, cfg: Optional[dict] = None) ->
logger.info(f'[{article_id}] STEP 4: Caption Rendering')
from shorts.tts_engine import _get_wav_duration
wav_dur = _get_wav_duration(tts_wav)
ass_path = render_captions(script, timestamps, captions_dir, ts, wav_dur, cfg=cfg)
corner = article.get('corner', '')
ass_path = render_captions(script, timestamps, captions_dir, ts, wav_dur, cfg=cfg, corner=corner)
result.steps_completed.append('caption_render')
# ── STEP 5: Video Assembly ───────────────────────────────
logger.info(f'[{article_id}] STEP 5: Video Assembly')
video_path = assemble(clips, tts_wav, ass_path, rendered_dir, ts, cfg=cfg)
# ── STEP 5: Video Assembly (ResilientAssembler + GPU 자동 감지) ──
logger.info(f'[{article_id}] STEP 5: Video Assembly (Resilient)')
assembler = ResilientAssembler(cfg=cfg)
video_path = assembler.assemble_resilient(clips, tts_wav, ass_path, rendered_dir, ts)
result.video_path = str(video_path)
result.steps_completed.append('video_assemble')