import requests
import json
import openpyxl
from openpyxl import Workbook
from urllib.parse import quote

def get_all_types_and_regions():
    base_url = "https://m.douban.com/rexxar/api/v2/movie/recommend"
    headers = {
        "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1",
        "Referer": "https://m.douban.com/movie/",
    }
    # 配置空参数
    initial_params = {
        "refresh": 0,
        "start": 0,
        "count": 20,
        "selected_categories": '{"类型":""}',  # 类型选择全部 =》 类型为空
        "uncollect": "false",
        "score_range": "0,10",
        "tags": ""
    }

    try:
        response = requests.get(base_url, params=initial_params, headers=headers)
        response.raise_for_status()
        data = response.json()
        return data.get("recommend_categories", [])
    except requests.exceptions.RequestException as e:
        print(f"请求失败: {e}")
        return []
    except json.JSONDecodeError:
        print("响应内容不是有效的JSON格式")
        return []


def get_all_movies(type):
    base_url = "https://m.douban.com/rexxar/api/v2/movie/recommend"
    headers = {
        "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1",
        "Referer": "https://m.douban.com/movie/",
    }

    # 第一次请求获取总数量
    initial_params = {
        "refresh": 0,
        "start": 0,
        "count": 20,
        "selected_categories": '{"类型":"' + type + '"}',
        "uncollect": "false",
        "score_range": "0,10",
        "tags": type
    }

    try:
        response = requests.get(base_url, params=initial_params, headers=headers)
        response.raise_for_status()
        data = response.json()

        total = data.get("total", 0)
        print(f"总共有 {total} 条记录")

        all_items = data.get("items", [])

        # 获取剩余数据
        batch_size = 20
        for start in range(batch_size, total, batch_size):
            params = {
                "refresh": 0,
                "start": start,
                "count": batch_size,
                "selected_categories": '{"类型":"' + type + '"}',
                "uncollect": "false",
                "score_range": "0,10",
                "tags": type
            }

            response = requests.get(base_url, params=params, headers=headers)
            response.raise_for_status()
            batch_data = response.json()
            all_items.extend(batch_data.get("items", []))
            print(f"类型：{type} 已获取 {len(all_items)}/{total} 条记录")

        # 使用列表推导式过滤掉不是电影的数据
        all_items = [item for item in all_items if item.get("type") == "movie"]

        return all_items

    except requests.exceptions.RequestException as e:
        print(f"请求失败: {e}")
        return []
    except json.JSONDecodeError:
        print("响应内容不是有效的JSON格式")
        return []


def parse_movie_info(movie):
    title = movie.get("title", "")
    pic_normal = movie.get("pic", {}).get("normal", "")
    rating = movie.get("rating", {}).get("value", "")

    # 解析副标题信息
    subtitle_parts = movie.get("card_subtitle", "").split("/")
    year = region = directors = ""
    types = []

    if len(subtitle_parts) >= 1:
        year = subtitle_parts[0].strip()
    if len(subtitle_parts) >= 2:
        region = subtitle_parts[1].strip()
    if len(subtitle_parts) >= 3:
        types_str = subtitle_parts[2].strip()
        types = [t.strip() for t in types_str.split() if t.strip()]
    if len(subtitle_parts) >= 4:
        directors = subtitle_parts[3].strip()

    return {
        "title": title,
        "year": year,
        "region": region,
        "directors": directors,
        "types": types,
        "rating": rating,
        "pic_url": pic_normal
    }


# 从推荐的信息里获取推荐的类型和推荐的地区名称
def extract_categories(recommend_categories):
    type_list = []
    region_list = []

    for category in recommend_categories:
        if category.get("type") == "类型":
            for item in category.get("data", []):
                if "text" in item and item["text"] != "全部":
                    type_list.append(item["text"])
        elif category.get("type") == "地区":
            for item in category.get("data", []):
                if "text" in item and item["text"] != "全部":
                    region_list.append(item["text"])

    return type_list, region_list


def save_to_excel(movies_data, type_list, region_list, filename="douban_movies.xlsx"):
    # 创建工作簿
    wb = Workbook()

    # 添加电影数据工作表
    ws_movies = wb.active
    ws_movies.title = "电影数据"

    # 写入表头
    headers = ["电影名称", "年份", "地区", "导演", "类型", "评分", "封面URL"]
    ws_movies.append(headers)

    # 写入电影数据
    for movie in movies_data:
        row = [
            movie["title"],
            movie["year"],
            movie["region"],
            movie["directors"],
            " ".join(movie["types"]),
            movie["rating"],
            movie["pic_url"]
        ]
        ws_movies.append(row)

    # 添加类型列表工作表
    ws_types = wb.create_sheet("类型列表")
    ws_types.append(["所有类型"])
    for item in sorted(type_list):
        ws_types.append([item])

    # 添加地区列表工作表
    ws_regions = wb.create_sheet("地区列表")
    ws_regions.append(["所有地区"])
    for item in sorted(region_list):
        ws_regions.append([item])

    # 保存文件
    wb.save(filename)
    print(f"数据已保存到 {filename}")


def main():
    # 提取所有类型和地区列表
    recommend_categories = get_all_types_and_regions()
    print(recommend_categories)
    type_list, region_list = extract_categories(recommend_categories)
    print(f"\n所有类型({len(type_list)}种):", type_list)
    print(f"\n所有地区({len(region_list)}个):", region_list)

    movies_data = []
    # 对每一种类型，获取其数据
    for type in type_list:
        all_items = get_all_movies(type)
        if not all_items:
            print(f"类型{type} 没有获取到电影数据")

        # 解析电影信息
        for item in all_items:
            movie_info = parse_movie_info(item)
            # 检查是否已存在相同的电影，有可能电影名相同，但是年份、地区、导演不同，例如情圣：1991星爷版、2016内地版
            if not any(movie["title"] == movie_info["title"] and movie["year"] == movie_info["year"] and movie["region"] == movie_info["region"] for movie in movies_data):
                movies_data.append(movie_info)
            else:
                print(f"重复：{movie_info['title']}")

    # 根据rating降序排序
    movies_data = sorted(movies_data, key=lambda x: float(x["rating"]) if x["rating"] else 0, reverse=True)
    
    # 保存到Excel
    save_to_excel(movies_data, type_list, region_list)


def import_to_database(filename="douban_movies.xlsx", table_name="movies"):
    import pandas as pd
    from sqlalchemy import create_engine, MetaData, Table, Column, Integer, String, Numeric
    from sqlalchemy.orm import sessionmaker

    HOSTNAME = "127.0.0.1"
    PORT = 3306
    USERNAME = "root"
    PASSWORD = "root"
    DATABASE = "imrs"
    db_uri = f"mysql+pymysql://{USERNAME}:{PASSWORD}@{HOSTNAME}:{PORT}/{DATABASE}?charset=utf8"
    
    if not db_uri:
        print("请提供数据库连接URI")
        return
        
    # 读取Excel文件
    df = pd.read_excel(filename, sheet_name="电影数据")
    
    # 处理空值
    df = df.fillna('')
    
    # 创建数据库连接
    engine = create_engine(db_uri)
    metadata = MetaData()
    
    # 定义表结构
    movies_table = Table(
        table_name, metadata,
        Column('id', Integer, primary_key=True, autoincrement=True),
        Column('title', String(255)),
        Column('year', Integer),
        Column('region', String(100)),
        Column('director', String(255)),
        Column('type', String(100)),
        Column('rating', Numeric(10, 1)),
        Column('poster', String(255))
    )
    
    # 创建表（如果不存在）
    metadata.create_all(engine)
    
    # 创建会话
    Session = sessionmaker(bind=engine)
    session = Session()
    
    try:
        # 批量插入数据
        for _, row in df.iterrows():
            session.execute(
                movies_table.insert().values(
                    title=row['电影名称'],
                    year=row['年份'],
                    region=row['地区'],
                    director=row['导演'],
                    type=row['类型'],
                    rating=row['评分'],
                    poster=row['封面URL']
                )
            )
        
        session.commit()
        print(f"成功导入 {len(df)} 条电影数据到数据库")
    except Exception as e:
        session.rollback()
        print(f"导入失败: {e}")
    finally:
        session.close()


if __name__ == "__main__":
    main()
    import_to_database()