# -*- coding: utf-8 -*-

"""
自动下载美国同行业年报 → 跑风险指标 → 生成 iv_us.csv
"""

import requests, zipfile, io, os, json, pandas as pd, tqdm
from pathlib import Path
import extract as extract
import preprocess as pre
# import predict as predict

API_KEY = "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"
OUT = Path("data/us_raw")
OUT.mkdir(exist_ok=True)

# 1. 按 2-digit SIC 行业拉取 10-K 列表
sic_map = pd.read_csv("ref/sic_china_us.csv")  # 自己维护中美行业映射
sic_codes = sic_map['us_sic2'].dropna().unique()

for year in range(2003, 2023):
    for sic in sic_codes:
        url = f"https://api.sec.gov/submissions/CIK{sic}*.json"
        payload = {"token": API_KEY, "form": "10-K", "year": year}
        r = requests.get(url, params=payload)
        if r.status_code != 200:
            continue
        filings = r.json()['filings']
        for f in filings[:10]:   # 每行业取前 10 家
            zip_url = f['documentUrl']
            zip_resp = requests.get(zip_url)
            z = zipfile.ZipFile(io.BytesIO(zip_resp.content))
            z.extractall(OUT / f"{year}_{sic}")

# 2. 复用国内流程跑文本指标
extract.run(OUT)          # 自动检测 pdf/htm/txt
pre.run(Path("data/us_txt"))
# predict.run(Path("data/us_filtered"), prefix="us")

# 3. 行业年度聚合
iv = pd.read_csv("outputs/us_risk_metrics.csv")
iv['sic2'] = iv['firm'].str.split("_").str[1].astype(int)
iv['year'] = iv['firm'].str.split("_").str[0].astype(int)
iv_us = iv.groupby(['sic2', 'year'])['risk'].mean().reset_index()
iv_us.rename(columns={'risk': 'IV_US'}, inplace=True)
iv_us.to_csv("outputs/iv_us.csv", index=False)
print("✅ IV_US 构建完成")
