import scrapy
import re
import time
import execjs
import json
import requests
from itemadapter import ItemAdapter


class QuoteSpider(scrapy.Spider):
    name = "quote"
    allowed_domains = ['eastmoney.com', 'push2.eastmoney.com']
    custom_settings = {
        'DOWNLOAD_DELAY': 1,  # 设置下载延迟为1秒
    }
    def start_requests(self):
        # 获取fs参数
        fs_list = self.get_type_data()
        if not fs_list:
            self.logger.error("未能获取到有效的 fs 参数")
            return

        # 获取gridlist信息
        yield scrapy.Request(
            url='http://quote.eastmoney.com/center/js/gridlist3.js',
            headers={
                "accept": "*/*",
                "accept-encoding": "gzip, deflate",
                "accept-language": "zh-CN,zh;q=0.9",
                "connection": "keep-alive",
                "referer": "http://quote.eastmoney.com/center/gridlist.html?st=ChangePercent&sr=-1",
                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
            },
            callback=self.parse_gridlist,
            cb_kwargs=dict(fs_list=fs_list)
        )

    def parse_gridlist(self, response, fs_list):
        ut = re.compile('ut: "(.*?)"').findall(response.text)
        fields = re.compile('fields: "(.*?)"').findall(response.text)

        if not ut or not fields:
            self.logger.error("获取失败")
            return

        field = next((i for i in fields if len(i) > 110), None)
        if not field:
            self.logger.error("字段解析失败")
            return

        cb = self.get_cb()
        wbp2u = self.get_wbp2u()

        base_url = "https://36.push2.eastmoney.com/api/qt/clist/get"
        params_template = {
            "cb": cb,
            "pn": 1,
            "pz": 20,
            "po": 1,
            "np": 1,
            "ut": ut[0],
            "fltt": 2,
            "invt": 2,
            "dect": 1,
            "wbp2u": wbp2u,
            "fid": "f3",
            "fields": field,
            "_": int(time.time())
        }

        for fs in fs_list:
            params = params_template.copy()
            params["fs"] = fs

            # 计算总页数
            first_request_params = params.copy()
            first_request_params["pn"] = 1
            yield scrapy.Request(
                url=f"{base_url}?{self.build_query_string(first_request_params)}",
                method='GET',
                headers={
                    "accept": "*/*",
                    "accept-encoding": "gzip, deflate",
                    "accept-language": "zh-CN,zh;q=0.9",
                    "connection": "keep-alive",
                    "host": "37.push2.eastmoney.com",
                    "referer": "http://quote.eastmoney.com/center/gridlist.html?st=ChangePercent&sr=-1",
                    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
                },
                callback=self.parse_first_page,
                cb_kwargs=dict(params=params)
            )

    def parse_first_page(self, response, params):
        if response.status == 200:
            try:
                json_str = response.text.strip().split("(")[1].rstrip(");")
                data = json.loads(json_str)
                total = data['data']['total']

                # 根据总条目数计算需要抓取的页数
                pages = (total + params["pz"] - 1) // params["pz"]

                for pn in range(1, pages + 1):
                    page_params = params.copy()
                    page_params["pn"] = pn
                    yield scrapy.Request(
                        url=f"https://36.push2.eastmoney.com/api/qt/clist/get?{self.build_query_string(page_params)}",
                        method='GET',
                        headers={
                            "accept": "*/*",
                            "accept-encoding": "gzip, deflate",
                            "accept-language": "zh-CN,zh;q=0.9",
                            "connection": "keep-alive",
                            "host": "37.push2.eastmoney.com",
                            "referer": "http://quote.eastmoney.com/center/gridlist.html?st=ChangePercent&sr=-1",
                            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
                        },
                        callback=self.parse_data
                    )
            except Exception as e:
                self.logger.error(f"解析数据时出错: {str(e)}")
        else:
            self.logger.error(f"获取失败: {response.status}")

    def parse_data(self, response):
        if response.status == 200:
            try:
                json_str = response.text.strip().split("(")[1].rstrip(");")
                data = json.loads(json_str)

                diff_data = data['data']['diff']
                for index, item in enumerate(diff_data, start=1):
                    f6_value = float(item['f6'])

                    stock_info = {
                        "index": index,  # 序号
                        "code": item['f12'],  # 代码
                        "name": item['f14'],  # 名称
                        "related_links": "股吧 资金流 数据",  # 相关链接（假设静态格式）
                        "latest_price": item['f2'],  # 最新价
                        "change_percentage": f"{item['f3']}%",  # 涨跌幅
                        "change_amount": item['f4'],  # 涨跌额
                        "volume_hands": item['f5'],  # 成交量(手)
                        "turnover_yuan": f"{f6_value / 10000:.2f}亿",  # 成交额（亿元）
                        "amplitude": f"{item['f7']}%",  # 振幅
                        "high": item['f8'],  # 最高
                        "low": item['f9'],  # 最低
                        "open_price": item['f10'],  # 今开
                        "previous_close": item['f11'],  # 昨收
                        "turnover_rate": f"{item['f23']}%",  # 换手率
                        "pe_ratio_dynamic": item['f24'],  # 市盈率(动态)
                        "pb_ratio": item['f25'],  # 市净率
                    }
                    yield stock_info
            except Exception as e:
                self.logger.error(f"解析数据时出错: {str(e)}")
        else:
            self.logger.error(f"获取失败: {response.status}")

    def get_cb(self):
        js_code = """
            function get_cb(){
                return "jQuery" + ("1.12.3" + Math.random()).replace(/\\D/g, "");
            }
            return get_cb();
        """
        ctx = execjs.compile(js_code)
        return ctx.call("get_cb")

    def get_wbp2u(self):
        js_code = """
            function get_wbp2u(){
                const cookieString = "qgqp_b_id=e5c1215b302e908f9e9596bc1598ab11; websitepoptg_api_time=1733498615406; rskey=LAoksaTBnVnVzZ204UnJoVXlkS3ZTL0t5dz09zNzUM; isoutside=0; st_si=90576546761010; st_asi=delete; st_sn=18; st_psi=20241207165207945-113200301321-4029504020; st_pvi=13044721870445; st_sp=2024-12-03%2020%3A18%3A01; st_inirUrl=https%3A%2F%2Fwww.baidu.com%2Flink";
                const parsedCookies = {};
                cookieString.split('; ').forEach(pair => {
                    const [name, value] = pair.split('=');
                    parsedCookies[name] = value;
                });

                var delayparams = 'UID|0|MOBILE|0|web';

                if (parsedCookies['uidal']) {
                    delayparams = delayparams.replace('UID', parsedCookies['uidal'].substring(0, 16));
                } else {
                    delayparams = delayparams.replace('UID', '');
                }

                if (parsedCookies['mtp'] != null) {
                    if (parsedCookies['mtp'] == '0') {
                        delayparams = delayparams.replace('MOBILE', '3');
                    } else {
                        delayparams = delayparams.replace('MOBILE', parsedCookies['mtp']);
                    }
                } else {
                    delayparams = delayparams.replace('MOBILE', '0');
                }

                return delayparams;
            }
            return get_wbp2u();
        """
        ctx = execjs.compile(js_code)
        return ctx.call("get_wbp2u")

    def get_type_data(self):
        url = "https://quote.eastmoney.com/center/js/gridlist3.js"

        headers = {
            'host': 'quote.eastmoney.com',
            'referer': 'https://quote.eastmoney.com/center/gridlist.html?st=ChangePercent&sr=-1',
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
        }
        fs = []
        try:
            response = requests.get(url, headers=headers)
            if response.status_code == 200:
                fs = re.compile(r'new_bankuai\.Bankuai\("#table_wrapper",\s*"([^"]*)"').findall(response.text)
            else:
                print("获取失败")
        except requests.RequestException as e:
            print(f"请求过程中出现错误: {e}")
        finally:
            pass  # 不需要调用 response.close()
        return fs

    def build_query_string(self, params):
        return '&'.join([f"{k}={v}" for k, v in params.items()])