import json
import random
from collections import Counter

# 配置
TOTAL_MOVIES = 1_000_000

# 名称词库
titles_prefix = ["Shadow", "Echo", "Galactic", "Crimson", "Silent", "Eternal", "Final", "Lost", "Rising", "Dark",
                 "Mystic", "Golden", "Broken", "Infinite", "Phantom", "Neon", "Frozen", "Solar", "Quantum", "Iron"]
titles_suffix = ["Dawn", "Horizon", "Legacy", "Warriors", "Destiny", "Empire", "Requiem", "Odyssey", "Chronicles", "Paradox",
                 "Revolution", "Sanctuary", "Vortex", "Eclipse", "Mirage", "Kingdom", "Protocol", "Exodus", "Awakening", "Fury"]

tags_pool = ["action", "comedy", "drama", "sci-fi", "fantasy", "horror", "romance", "thriller",
             "mystery", "animation", "documentary", "war", "western", "crime", "adventure"]

# 生成唯一作者名（英文风格）
def generate_author_name(seed):
    first_names = ["James", "Emma", "Liam", "Olivia", "Noah", "Ava", "William", "Sophia", "Lucas", "Isabella",
                   "Michael", "Mia", "Alexander", "Charlotte", "Daniel", "Amelia", "Henry", "Harper", "Jacob", "Evelyn"]
    last_names = ["Smith", "Johnson", "Williams", "Brown", "Jones", "Garcia", "Miller", "Davis", "Rodriguez", "Martinez",
                  "Hernandez", "Lopez", "Gonzalez", "Wilson", "Anderson", "Thomas", "Taylor", "Moore", "Jackson", "Martin"]
    random.seed(seed)
    return f"{random.choice(first_names)} {random.choice(last_names)}"

# === 第一步：构造作者作品数量分布 ===
author_counts = []

# 1. 超高产作者：1% → 至少15部
num_super_authors = int(0.01 * TOTAL_MOVIES)  # 注意：这是“作者数量”不是电影数！需调整逻辑

# 更合理的方式：我们不知道作者总数，所以反向计算。
# 设作者总数为 A，则：
#   0.01A * avg15 + 0.04A * avg10 + 0.11A * 3.5 + 0.19A * 2 + 0.7A * 1 ≈ 1,000,000
# 粗略估算 A ≈ 600,000（因为大量作者只有一部）

# 我们采用迭代方式：先设定作者数量，再分配作品数

# 经验值：约 650,000 位作者可满足 1M 电影 + 分布要求
ESTIMATED_AUTHORS = 650_000

num_super = int(0.01 * ESTIMATED_AUTHORS)      # ≥15
num_high5 = int(0.04 * ESTIMATED_AUTHORS)      # 5~14
num_mid3 = int(0.11 * ESTIMATED_AUTHORS)       # 3~4
num_double = int(0.19 * ESTIMATED_AUTHORS)     # 2
num_single = ESTIMATED_AUTHORS - (num_super + num_high5 + num_mid3 + num_double)

# 调整：确保总数不超过 1M 电影
author_movie_counts = []

# 超高产：15~50
for _ in range(num_super):
    cnt = random.randint(15, 50)
    author_movie_counts.append(cnt)

# 高产（5~14）
for _ in range(num_high5):
    cnt = random.randint(5, 14)
    author_movie_counts.append(cnt)

# 中产（3~4）
for _ in range(num_mid3):
    cnt = random.randint(3, 4)
    author_movie_counts.append(cnt)

# 双产（2）
for _ in range(num_double):
    author_movie_counts.append(2)

# 单产（1）
for _ in range(num_single):
    author_movie_counts.append(1)

total_so_far = sum(author_movie_counts)

# 如果超过 1M，裁剪；如果不足，补充单产作者
if total_so_far > TOTAL_MOVIES:
    # 简单策略：减少部分高产作者数量
    diff = total_so_far - TOTAL_MOVIES
    i = 0
    while diff > 0 and i < len(author_movie_counts):
        if author_movie_counts[i] > 1:
            reduce_by = min(author_movie_counts[i] - 1, diff)
            author_movie_counts[i] -= reduce_by
            diff -= reduce_by
        i += 1
elif total_so_far < TOTAL_MOVIES:
    # 补充单产作者
    deficit = TOTAL_MOVIES - total_so_far
    author_movie_counts.extend([1] * deficit)

# 再次检查
final_total = sum(author_movie_counts)
assert final_total == TOTAL_MOVIES, f"Total movies mismatch: {final_total}"

# === 第二步：生成电影记录 ===
print(f"✅ 总作者数: {len(author_movie_counts)}")
print(f"✅ 总电影数: {final_total}")

# 生成作者名（保证唯一）
authors = [generate_author_name(i) for i in range(len(author_movie_counts))]

# 打开输出文件
with open("sql/insert_t_movie_1M.sql", "w", encoding="utf-8") as f:
    f.write("-- MySQL dump for t_movie (1 million records)\n")
    f.write("SET autocommit=0;\n")
    f.write("START TRANSACTION;\n")

    movie_id = 1
    batch_size = 5000  # 每批5000条
    batch = []

    for author, count in zip(authors, author_movie_counts):
        for _ in range(count):
            name = f"{random.choice(titles_prefix)} of {random.choice(titles_suffix)}"
            tag_list = random.sample(tags_pool, k=random.randint(1, min(5, len(tags_pool))))
            tag_json = json.dumps(tag_list, ensure_ascii=False)
            # 转义单引号（虽然名字中不太可能有，但安全起见）
            name_escaped = name.replace("'", "''")
            author_escaped = author.replace("'", "''")
            batch.append(f"({movie_id}, '{name_escaped}', '{author_escaped}', '{tag_json}')")
            movie_id += 1

            if len(batch) >= batch_size:
                f.write("INSERT INTO t_movie (id, name, author, tag) VALUES\n")
                f.write(",\n".join(batch) + ";\n")
                batch = []

    # 写入剩余
    if batch:
        f.write("INSERT INTO t_movie (id, name, author, tag) VALUES\n")
        f.write(",\n".join(batch) + ";\n")

    f.write("COMMIT;\n")
    f.write("-- Done.\n")

print("✅ 已生成 insert_t_movie_1M.sql 文件（100万条数据）")

# 验证分布
counts = author_movie_counts
total_authors = len(counts)

pct_multi = sum(1 for c in counts if c >= 2) / total_authors
pct_ge3 = sum(1 for c in counts if c >= 3) / total_authors
pct_ge5 = sum(1 for c in counts if c >= 5) / total_authors
pct_ge15 = sum(1 for c in counts if c >= 15) / total_authors

print(f"≥2 部: {pct_multi:.2%} (目标 ≥30%)")
print(f"≥3 部: {pct_ge3:.2%} (目标 ≥15%)")
print(f"≥5 部: {pct_ge5:.2%} (目标 ≥5%)")
print(f"≥15 部: {pct_ge15:.2%} (目标 ≥1%)")