import argparse
import os
import re
import pickle
from typing import Dict, List

import jieba
import numpy as np
import pandas as pd
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer


COOKING_KEYWORDS = {
    "fried": ["炸", "酥", "炸鸡", "椒盐", "香酥", "香炸", "香辣"],
    "steamed": ["蒸", "清蒸", "小蒸", "水蒸"],
    "braised": ["红烧", "烧", "卤", "焖", "酱", "焖"],
    "stirfry": ["炒", "炒蛋", "干锅", "小炒", "爆", "煸", "爆炒"],
    "grilled": ["烤", "烤鸡", "烤肉", "烧烤"],
}

PROTEIN_KEYWORDS = {
    "beef": ["牛", "牛肉", "牛柳", "牛仔"],
    "pork": ["猪", "排", "肉", "五花"],
    "chicken": ["鸡", "鸡块", "鸡腿", "鸡翅"],
    "fish": ["鱼", "鱼片", "鱼排"],
    "shrimp": ["虾", "海鲜", "虾仁"],
    "tofu": ["豆腐", "百叶", "千页", "豆腐皮"],
    "egg": ["蛋", "蒸蛋", "炒蛋", "煎蛋"],
    "mushroom": ["菇", "蘑菇", "金针菇", "香菇"],
}

ALLERGEN_KEYWORDS = {
    "has_milk": ["酸奶", "牛奶", "奶"],
    "has_egg": ["蛋", "蒸蛋", "炒蛋"],
    "has_shellfish": ["虾", "蟹", "贝", "海鲜"],
    "has_peanut": ["花生"],
    "has_soy": ["酱油", "豆豉", "豆腐", "黄豆", "毛豆"],
}

# 简单的标准化/同义词映射，用于把常见变体映射到统一词项
STANDARD_MAP = {
    "鸡块": "鸡",
    "鸡腿": "鸡",
    "鸡翅": "鸡",
    "牛柳": "牛",
    "五花肉": "猪",
    "红糖发糕": "发糕",
    "玉米发糕": "发糕",
    "桂花糕": "发糕",
    "白菜猪肉水饺": "水饺",
}


def read_table(path: str) -> pd.DataFrame:
    # try csv then excel
    try:
        return pd.read_csv(path, encoding="utf-8")
    except Exception:
        try:
            return pd.read_csv(path, encoding="gbk")
        except Exception:
            return pd.read_excel(path, engine="openpyxl")


def normalize_text(s: str) -> str:
    if pd.isna(s):
        return ""
    s = str(s)
    s = s.replace("，", ",").replace("、", ",")
    s = re.sub(r"\s+", " ", s).strip()
    return s


def build_all_dishes(df: pd.DataFrame, dish_cols: List[str]) -> pd.Series:
    def join_row(r):
        parts = [normalize_text(r.get(c, "")) for c in dish_cols]
        parts = [p for p in parts if p and p != "无"]
        # apply STANDARD_MAP replacements to each token
        tokens: List[str] = []
        for p in parts:
            # split by common delimiters then map
            subs = re.split(r"[,，、\s]+", p)
            for s in subs:
                s = s.strip()
                if not s:
                    continue
                for k, v in STANDARD_MAP.items():
                    if k in s:
                        s = s.replace(k, v)
                tokens.append(s)
        return " ".join(tokens)

    return df.apply(join_row, axis=1)


def keyword_flags(text: str, keywords: Dict[str, List[str]]) -> Dict[str, int]:
    out = {}
    for k, kwlist in keywords.items():
        found = 0
        for kw in kwlist:
            if kw in text:
                found = 1
                break
        out[k] = found
    return out


def extract_counts(text: str) -> Dict[str, int]:
    parts = text.split()
    parts = [p for p in parts if p]
    nonempty = len(parts)
    unique = len(set(parts))
    avg_len = np.mean([len(p) for p in parts]) if parts else 0.0
    return {
        "num_dishes_nonempty": nonempty,
        "num_unique_dishes": unique,
        "avg_dish_name_len": float(avg_len),
    }


def compute_topk_onehot(series: pd.Series, k: int = 50):
    # series of all_dishes
    tokens = series.str.split().explode()
    topk = tokens.value_counts().head(k).index.tolist()
    topk_cols = {}
    for dish in topk:
        col = f"contains_{dish}"
        topk_cols[col] = series.str.contains(dish).astype(int)
    topk_df = pd.DataFrame(topk_cols)
    return topk_df, topk


def jieba_tokenizer(s: str):
    return jieba.lcut(s)


def compute_tfidf(series: pd.Series, dim: int = 100):
    # use module-level jieba_tokenizer so vectorizer is picklable
    vect = TfidfVectorizer(tokenizer=jieba_tokenizer, token_pattern=None)
    X = vect.fit_transform(series.fillna(""))
    if dim is not None and dim > 0 and X.shape[1] > dim:
        svd = TruncatedSVD(n_components=dim, random_state=42)
        Xd = svd.fit_transform(X)
        cols = [f"tfidf_{i}" for i in range(Xd.shape[1])]
        tfidf_df = pd.DataFrame(Xd, columns=cols)
        return tfidf_df, vect, svd
    else:
        Xs = X.toarray()
        cols = [f"tfidf_{i}" for i in range(Xs.shape[1])]
        return pd.DataFrame(Xs, columns=cols), vect, None


def assemble_features(df: pd.DataFrame, all_dishes_col: str = "all_dishes", top_k: int = 50, tfidf_dim: int = 50):
    features = pd.DataFrame()
    # pass-through columns (ensure exist)
    for c in ["周次", "日期", "套餐类型", "热量", "蛋白质", "脂肪"]:
        if c in df.columns:
            features[c] = df[c]
        else:
            features[c] = None

    series = df[all_dishes_col].fillna("")
    # counts
    counts = series.apply(extract_counts).apply(pd.Series)
    features = pd.concat([features, counts], axis=1)

    # cooking method flags
    cook_flags = series.apply(lambda s: pd.Series(keyword_flags(s, COOKING_KEYWORDS)))
    features = pd.concat([features, cook_flags.add_prefix("cook_")], axis=1)

    # protein flags
    protein_flags = series.apply(lambda s: pd.Series(keyword_flags(s, PROTEIN_KEYWORDS)))
    features = pd.concat([features, protein_flags.add_prefix("has_")], axis=1)

    # allergen flags
    allergen_flags = series.apply(lambda s: pd.Series(keyword_flags(s, ALLERGEN_KEYWORDS)))
    features = pd.concat([features, allergen_flags], axis=1)

    # top-k one-hot
    topk_df, topk_list = compute_topk_onehot(series, k=top_k)
    features = pd.concat([features, topk_df.reset_index(drop=True)], axis=1)

    # tfidf
    tfidf_df, vect, svd = compute_tfidf(series, dim=tfidf_dim)
    features = pd.concat([features, tfidf_df.reset_index(drop=True)], axis=1)

    # nutrition ratios
    with np.errstate(divide="ignore", invalid="ignore"):
        features["protein_per_energy"] = features["蛋白质"] / features["热量"]
        features["fat_per_energy"] = features["脂肪"] / features["热量"]

    return features, vect, svd, topk_list


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--input", required=True)
    parser.add_argument("--output", required=True)
    parser.add_argument("--top-k", type=int, default=50)
    parser.add_argument("--tfidf-dim", type=int, default=50)
    args = parser.parse_args()

    df = read_table(args.input)
    dish_cols = [c for c in df.columns if c in ["荤菜1", "荤菜2", "荤菜3", "素菜", "杂粮", "主食", "汤", "配餐"]]
    df["all_dishes"] = build_all_dishes(df, dish_cols)
    features, vect, svd, topk = assemble_features(df, all_dishes_col="all_dishes", top_k=args.top_k, tfidf_dim=args.tfidf_dim)

    out_dir = os.path.dirname(args.output)
    if out_dir and not os.path.exists(out_dir):
        os.makedirs(out_dir, exist_ok=True)

    features.to_csv(args.output, index=False, encoding="utf-8-sig")

    # save artifacts
    art_dir = os.path.join(os.path.dirname(args.output), "artifacts")
    os.makedirs(art_dir, exist_ok=True)
    with open(os.path.join(art_dir, "vectorizer.pkl"), "wb") as f:
        pickle.dump(vect, f)
    if svd is not None:
        with open(os.path.join(art_dir, "svd.pkl"), "wb") as f:
            pickle.dump(svd, f)
    with open(os.path.join(art_dir, "topk.txt"), "w", encoding="utf-8") as f:
        for t in topk:
            f.write(t + "\n")


if __name__ == "__main__":
    main()
