import json
import math
import time
import urllib.parse
from typing import Iterable

import scrapy
from scrapy import Request

from eastmoney.items import RateItem, NavItem


class FundSpider(scrapy.Spider):
    name = 'FundSpider'
    url = 'https://dcmswg.eastmoneysec.com/wealthmangeservice'
    nav_base_url = 'https://datacenter.eastmoney.com//securities/api/data/get'
    params = {
        "args": {
            "orgCodes": [],
            "exclude": 1,
            "openStates": [1],
            "applyRates": [],
            "riskLevels": [],
            "fundScales": [],
            "fundRatings": [],
            "foundYears": [],
            "requestType": 1,
            "fundType": 1,
            "orderField": "change7days",
            "orderFlag": "desc",
            "pageNo": 1,
            "pageSize": 20
        },
        "method": "fundRange",
        "appKey": "com.eastmoney.iphone",
        "deviceId": "A655A63C-726B-43CC-807A-00B1E83D5D80",
        "client": "ios",
        "clientVersion": "10.23",
        "clientType": "cfw",
        "randomCode": "FN5NWyC7wKXNrAha",
        "reserve": "",
        "timestamp": 1726738244348,
        "sign": "71fefc94e859c3a9b13fc530e1172648"
    }

    params_nav = {
        "type": "RPT_F10_FUND_PERNAV",
        "sty": "SECURITY_CODE,SECURITY_INNER_CODE,SECUCODE,SECURITY_NAME,SECURITY_NAME_ABBR,"
               "END_DATE,PER_NAV,PER_ACCUM_NAV,ADJNAV_GR,INCOME_TEN_THOUSAND,YIELD_7DAYS,THOUSAND_TYPE,"
               "FUND_TYPE,FUND_TYPE_CODE,SPECIAL_MARK,IS_LARGE_REDEEM",
        "source": "HSF10",
        "client": "APP",
        "p": 1,
        "ps": 40,
        "sr": -1,
        "st": "END_DATE",
        "filter": '(SECUCODE="007424.OF")'
    }

    params_trade_info = {
        "appKey": "cfw",
        "clientVersion": "10.23",
        "client": "ios",
        "method": "fundPageTradeInfo",
        "randomCode": "57AB5516-0E4A-47",
        "clientType": "cfw",
        "reserve": "",
        "timestamp": time.time(),  # 当前时间戳
        "args": {
            "token": "cqfiil4bpd8rbpnvhdsn",
            "fundCode": "017436",
            "requestCode": "com.eastmoney.iphone"
        }
    }

    headers = {
        'Accept': '*/*',
        'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
        'Connection': 'keep-alive',
        'Host': 'dcmswg.eastmoneysec.com',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/117.0',
    }

    def start_requests(self) -> Iterable[Request]:
        """
            获取基金总数
        """
        self.params["method"] = "fundCount"
        yield scrapy.Request(url=self.url, method='POST', headers=self.headers, body=json.dumps(self.params),
                             callback=self.parse_all_fund)

    def parse_all_fund(self, response):
        """
            获取所有的基金
        """
        resp = json.loads(response.text)
        total = resp["data"]["total"]


        page_count = math.ceil(total / 20)
        for page in range(1, 1 + page_count):
            self.params['args']['pageNo'] = page
            self.params['method'] = 'fundRange'
            headers = {
                'Content-Type': 'application/json; charset=utf-8',
                'Host': 'dcmswg.eastmoneysec.com',
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/117.0',
            }

            # 每次查一个分页 20 个基金
            yield scrapy.Request(url=self.url, headers=headers, method='POST', body=json.dumps(self.params),
                             callback=self.parse_page_fund)


    def parse_page_fund(self, response):
        """
            获取一页 20 条基金的具体信息: 费率 + 净值总条数
        """
        resp = json.loads(response.text)
        fund_list = resp["data"]["items"]
        for fund in fund_list:
            name = fund['securityName']
            code = fund['securityCode']

            # 这里要修改 code，不然每次请求的是同一个基金的 rate 信息
            self.params_trade_info["args"]["fundCode"] = code
            yield scrapy.Request(url=self.url, method="POST",
                                 meta={'name': name, 'code': code},
                                 body=json.dumps(self.params_trade_info),
                                 callback=self.parse_rate)

            # 这里要修改 filter，不然每次请求的是同一个基金的 nav 信息
            self.params_nav['filter'] = f'(SECUCODE="{code}.OF")'
            # 解析一个基金最近 30 天的净值
            self.params_nav["ps"] = 30
            query_string = urllib.parse.urlencode(self.params_nav)
            nav_url = f'{self.nav_base_url}?{query_string}'
            yield scrapy.Request(url=nav_url, method='GET', meta={'name': name, 'code': code},
                                 callback=self.parse_nav)
            # break

    def parse_rate(self, response):
        """
            解析基金的费率
        """
        rate_item = RateItem()
        resp = json.loads(response.text)['data']
        rate_item['fund_code'] = response.meta['code']  # 代码
        rate_item['fund_name'] = response.meta['name']  # 名称
        rate_item['discount'] = resp['discount']  # 费率
        rate_item['taxRate'] = resp['taxRate']  # 原费率
        yield rate_item

    def parse_nav(self, response):
        """
            ~解析一个基金的所有净值~
            很多基金的净值数据有 1k+条，共有1w+基金
            10000 * 1000 数据量太大，爬取的很慢，一次只爬取最近 30 天的净值
        """
        nav_item = NavItem()
        res_item = []
        resp = json.loads(response.text)['result']['data']
        # print(f"fund_code: {response.meta['code']} fund_name: {response.meta['name']}")

        # 爬取最近 30 天净值
        for el in resp:
            nav_item['fund_code'] = response.meta['code']  # 代码
            nav_item['fund_name'] = response.meta['name']  # 名称
            nav_item['date'] = el['END_DATE']
            nav_item['per_nav'] = el['PER_NAV']
            nav_item['per_accum_nav'] = el['PER_ACCUM_NAV']
            res_item.append(nav_item)
            yield nav_item
