import json
import math

import scrapy
from scrapy import Request
from selenium import webdriver
from selenium.webdriver.chrome.options import Options

from app import evn_
from spiders.items import StockInfoItem
from utils.util import StockTool


class StockInfoSpider(scrapy.Spider):
    name = 'stock_info'
    industry_list = []
    stock_detail_url = 'http://quote.eastmoney.com/concept/{0}{1}.html'

    url = 'https://push2.eastmoney.com/api/qt/clist/get?'
    param = 'cb=jQuery1123015544789836278805_1639983478245&' \
            'fid=f62&' \
            'po=1&' \
            'pz=50&' \
            'pn={pageNo}&' \
            'np=1&' \
            'fltt=2&' \
            'invt=2&' \
            'ut=b2884a393a59ad64002292a3e90d46a5&' \
            'fs=b:{code}&' \
            'fields=f12,f14'
    url_param = url + param

    def start_requests(self):
        '''
        代替start_urls,从数据库读取板块编码，拼接读取股票信息
        :return: 示例：http://data.eastmoney.com/bkzj/BK0482.html
        https://push2.eastmoney.com/api/qt/clist/get?cb=jQuery1123015544789836278805_1639983478245&fid=f62&po=1&pz=50&pn=1&np=1&fltt=2&invt=2&ut=b2884a393a59ad64002292a3e90d46a5&fs=b%3ABK0482&fields=f12%2Cf14%2Cf2%2Cf3%2Cf62%2Cf184%2Cf66%2Cf69%2Cf72%2Cf75%2Cf78%2Cf81%2Cf84%2Cf87%2Cf204%2Cf205%2Cf124%2Cf1%2Cf13

        '''

        for industryInfo in self.industry_list:
            if industryInfo:
                code = getattr(industryInfo, 'code')
                if code:
                    pageNo = 1
                    start_url = self.url_param.format(pageNo=pageNo, code=code)
                    meta = {'name': getattr(industryInfo, "name"), "code": code}
                    yield Request(start_url, callback=self.parse_item, meta=meta)

    def __init__(self, industry_list):
        self.industry_list = industry_list
        chrome_options = Options()

        chrome_options.add_argument('--disable-gpu')
        chrome_options.add_argument('lang=zh_CN.UTF-8')
        chrome_options.add_argument('headless')  # 无头浏览器
        chrome_options.add_argument('--no-sandbox')  # 必要！！
        chrome_options.add_argument('--disable-dev-shm-usage')  # 必要！！
        prefs = {
            "profile.managed_default_content_settings.images": 2,  # 禁止加载图片
            'permissions.default.stylesheet': 2,  # 禁止加载css
        }
        chrome_options.add_experimental_option("prefs", prefs)
        self.bro = webdriver.Chrome(executable_path=evn_.CHROMEDRIVER_PATH, chrome_options=chrome_options)

    def parse_item(self, response):
        industry_name = response.meta["name"]  ##取出数据
        industry_code = response.meta["code"]

        text = response.text
        startIndex = text.find('(')
        endIndex = text.rfind(')')
        json_str = text[startIndex + 1:endIndex]
        dic_res = json.loads(json_str)
        data_ = dic_res['data']

        klines_list = data_['diff']
        counts = len(klines_list)
        total_ = data_['total']
        if counts == total_:
            for stock in klines_list:
                stock_code = stock['f12']
                stock_name = stock['f14']
                if "ST" in stock_name:
                    continue
                if '退市' in stock_name:
                    continue
                if 'B' in stock_name:
                    continue
                if stock_code.startswith('68'):
                    continue

                item = StockInfoItem()
                item['industry_name'] = industry_name
                item['stock_code'] = stock_code
                item['stock_name'] = stock_name
                item['market'] = StockTool.code2Market(stock_code)
                item['stock_detail_url'] = self.stock_detail_url.format(item['market'], stock_code)
                yield item
        else:
            start_urls = []
            size = math.ceil(total_ / 50)
            for num in range(0, size):
                start_urls.append(self.url_param.format(pageNo=num+1, code=industry_code))

            for start_url in start_urls:
                yield Request(url=start_url, meta={"name": industry_name}, callback=self.parse_total,dont_filter=True)

    def parse_total(self, response):

        industry_name = response.meta["name"]  ##取出数据
        text = response.text
        startIndex = text.find('(')
        endIndex = text.rfind(')')
        json_str = text[startIndex + 1:endIndex]
        dic_res = json.loads(json_str)
        data_ = dic_res['data']

        klines_list = data_['diff']
        for stock in klines_list:
            stock_code = stock['f12']
            stock_name = stock['f14']
            if "ST" in stock_name:
                continue
            if '退市' in stock_name:
                continue
            if 'B' in stock_name:
                continue
            if stock_code.startswith('68'):
                continue

            item = StockInfoItem()
            item['industry_name'] = industry_name
            item['stock_code'] = stock_code
            item['stock_name'] = stock_name
            item['market'] = StockTool.code2Market(stock_code)
            item['stock_detail_url'] = self.stock_detail_url.format(item['market'], stock_code)
            yield item
