import requests
from bs4 import BeautifulSoup
import json
import concurrent.futures
import threading # Optional, for potential advanced synchronization if needed, but ThreadPoolExecutor often abstracts this
import pandas as pd
from init_db import init_database, write_data


class Spider:
    def __init__(self):
        self.url='https://store.steampowered.com/search/results/?query&count=50&dynamic_data=&sort_by=Reviews_DESC&snr=1_7_7_230_7&supportedlang=schinese&infinite=1'
        # self.url = 'https://store.steamchina.com/search/results/?query&count=50&dynamic_data=&sort_by=_ASC&snr=1_7_7_230_7&category1=998%2C996&infinite=1'
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        self.session = requests.Session()
        # 初始化数据库
        init_database()

    def save_to_database(self, games_list, db_name='steam_games.db'):
        """将游戏数据保存到SQLite数据库"""
        success_count = 0
        for game in games_list:
            if write_data(game, db_name):
                success_count += 1
        print(f"成功保存 {success_count} 条数据到数据库")

    def scrape_page(self, start):
        """
        爬取指定start索引的页面数据
        并将提取到的游戏列表添加到共享列表中
        """
        print(f'数据从第{start}条数据开始爬起.........')
        params = {
            'start': start,
            'count': 100 # 确保每页请求100条数据
        }
        games_on_page = [] # 存储当前页面游戏数据的列表

        try:
            # 增加超时时间
            response = self.session.get(self.url, headers=self.headers, params=params, timeout=20)
            # 对于不良状态码引发异常
            response.raise_for_status()
            data = response.json()

            if data and data.get('success') == 1 and data.get('results_html'):
                soup = BeautifulSoup(data['results_html'], 'lxml')
                # 每个游戏条目通常是带有类 'search_result_row' 的 <a> 标签
                games = soup.find_all('a', class_='search_result_row')

                if games:
                    print(f"页面 {start} 找到 {len(games)} 个游戏条目。")
                    for game in games:
                        # 基础信息提取
                        link = game.get('href', "N/A")
                        appid = game.get('data-ds-appid', "N/A")
                        
                        # 提取游戏名称
                        title_element = game.find('span', class_='title')
                        game_name = title_element.text.strip() if title_element else "N/A"
                        
                        # 提取上架时间
                        date_element = game.find('div', class_='col search_released responsive_secondrow')
                        date_game = date_element.text.strip() if date_element else "N/A"
                        
                        # 提取照片路径
                        image_element = game.find('img')
                        photo_path = image_element.get('src', "N/A") if image_element else "N/A"
                        
                        # 提取价格信息
                        final_price = "N/A"
                        original_price = "N/A"
                        discount = "N/A"
                        price_container = game.find('div', class_='col search_price_discount_combined responsive_secondrow')
                        if price_container:
                            # 检查是否有折扣
                            discount_element = price_container.find('div', class_='discount_pct')
                            if discount_element:
                                discount = discount_element.text.strip()
                                # 获取原价
                                original_price_element = price_container.find('span', class_='discount_original_price')
                                if original_price_element:
                                    original_price = original_price_element.text.strip()
                                # 获取折扣价
                                final_price_element = price_container.find('div', class_='discount_final_price')
                                if final_price_element:
                                    final_price = final_price_element.text.strip()
                            else:
                                # 无折扣情况
                                final_price = price_container.text.strip()
                                original_price = final_price
                        
                        # 处理免费游戏
                        if "Free" in final_price or final_price == "N/A" or not final_price.strip():
                            final_price = "免费"
                            original_price = "免费"
                        
                        # 提取评价信息
                        review_summary_element = game.find('span', class_='search_review_summary')
                        review_tooltip_html = review_summary_element.get('data-tooltip-html', "N/A") if review_summary_element else "N/A"
                        
                        # 处理评价信息
                        review_text = "N/A"
                        review_score = "N/A"
                        if review_tooltip_html != "N/A":
                            tooltip_soup = BeautifulSoup(review_tooltip_html, 'lxml')
                            for br in tooltip_soup.find_all('br'):
                                br.extract()
                            review_text = tooltip_soup.get_text().strip()
                            # 提取评分
                            if "好评" in review_text:
                                review_score = "好评"
                            elif "差评" in review_text:
                                review_score = "差评"
                            elif "褒贬不一" in review_text:
                                review_score = "褒贬不一"
                        
                        
                        # 提取游戏平台
                        platforms = []
                        if game.find('span', class_='platform_img win'):
                            platforms.append("Windows")
                        if game.find('span', class_='platform_img mac'):
                            platforms.append("Mac")
                        if game.find('span', class_='platform_img linux'):
                            platforms.append("Linux")
                        platform_text = ", ".join(platforms) if platforms else "N/A"
                
                        
                        # 合并所有信息
                        game_info = {
                            '游戏名称': game_name,
                            '上架时间': date_game,
                            '最终价格': final_price,
                            '原价': original_price,
                            '折扣': discount,
                            '评价总结': review_text,
                            '评价等级': review_score,
                            '照片路径': photo_path,
                            'appid': appid,
                            '游戏链接': link,
                            '游戏平台': platform_text,
                        }
                        
                        games_on_page.append(game_info)

                return games_on_page # 返回找到的游戏列表，用于判断是否继续爬取

            else:
                print(f"请求失败或返回的数据格式不正确。URL: {self.url}, start: {start}")
                # print(f"响应数据: {data}") # 打印数据以检查结构是否符合预期 # 敏感信息，最好不要打印完整响应
                return [] # 未找到游戏条目，返回空列表

        except requests.exceptions.RequestException as e:
            print(f"发生网络错误 (start={start}): {e}")
            return [] # 发生错误，返回空列表
        except Exception as e:
            print(f"发生解析错误或其他错误 (start={start}): {e}")
            return [] # 发生错误，返回空列表



if __name__ == '__main__':
    spider = Spider()
    all_games_list = []
    start_index = 0
    count_per_page = 100
    max_threads = 20 # 可以根据需要调整线程数
    target_count = 1500 # 目标爬取数量

    # 使用线程池执行任务
    with concurrent.futures.ThreadPoolExecutor(max_workers=max_threads) as executor:
        futures = {}
        # 初始提交第一个页面的任务
        if len(all_games_list) < target_count:
            future_to_start = executor.submit(spider.scrape_page, start_index)
            futures[future_to_start] = start_index
            start_index += count_per_page

        # 循环检查已完成的任务并提交新的任务
        while futures and len(all_games_list) < target_count:
            done, _ = concurrent.futures.wait(futures, return_when=concurrent.futures.FIRST_COMPLETED)

            for future in done:
                start = futures.pop(future) # 移除已完成的任务
                try:
                    page_games = future.result()
                    if page_games:
                        all_games_list.extend(page_games)
                        print(f"页面 {start} 找到 {len(page_games)} 条游戏。当前总数: {len(all_games_list)}")

                        # 如果还没有达到目标数量，提交下一个页面任务
                        if len(all_games_list) < target_count:
                             next_start = start + count_per_page
                             print(f"提交下一个页面任务，start={next_start}")
                             next_future = executor.submit(spider.scrape_page, next_start)
                             futures[next_future] = next_start
                        else:
                            print(f"已达到或超过目标数量 {target_count}，停止提交新任务。")

                    else:
                        print(f"从start={start}开始的页面未找到游戏条目或请求失败，停止从这个分支继续提交任务。")

                except Exception as exc:
                    print(f'start={start} 的页面生成异常: {exc}')
                    # 发生异常时也停止从这个分支继续提交任务
    concurrent.futures.wait(futures)

    if all_games_list:
        # 将列表转换为 pandas DataFrame
        # 截取前 target_count 条数据，以防最后一次添加超出数量
        df = pd.DataFrame(all_games_list[:target_count])
        # 将 DataFrame 保存为 CSV 文件
        csv_filename = 'steam_games.csv'
        df.to_csv(csv_filename, index=False, encoding='utf-8')
        print(f"所有数据已爬取完成，共找到 {len(df)} 条游戏数据，已保存到 {csv_filename} 文件。")
        
        # 保存到数据库
        spider.save_to_database(all_games_list[:target_count])
    else:
        print("未爬取到任何游戏数据。")