# -*- coding: utf-8 -*-
import datetime
import json
import re
import time

import scrapy
from dateutil.parser import parse

from apps.listed_company.listed_company.items import ListedCompanyItem
from loguru import logger
from utils.tools import urlencode, urldecode


class NeeqSpider(scrapy.Spider):
    listed_exchange = '全国中小企业股份转让系统'
    name = 'neeq'
    headers = {
        "Accept": "text/javascript, application/javascript, application/ecmascript, application/x-ecmascript, */*; q=0.01",
        "Accept-Language": "zh,zh-TW;q=0.9,en-US;q=0.8,en;q=0.7,zh-CN;q=0.6",
        "Connection": "keep-alive",
        "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
        "Origin": "https://www.neeq.com.cn",
        "Referer": "https://www.neeq.com.cn/nq/listedcompany.html",
        "Sec-Fetch-Dest": "empty",
        "Sec-Fetch-Mode": "cors",
        "Sec-Fetch-Site": "same-origin",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
        "X-Requested-With": "XMLHttpRequest",
        "sec-ch-ua": "\"Not/A)Brand\";v=\"8\", \"Chromium\";v=\"126\", \"Google Chrome\";v=\"126\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Windows\""
    }
    cookies = {
        "Hm_lvt_b58fe8237d8d72ce286e1dbd2fc8308c": "1720597512,1720679923",
        "HMACCOUNT": "A8AA8D522A6ABBC0",
        "Hm_lpvt_b58fe8237d8d72ce286e1dbd2fc8308c": "1720682528",
        "C3VK": "3d1f12"
    }

    def start_requests(self):
        url = "https://www.neeq.com.cn/nqxxController/nqxxCnzq.do?callback=jQuery331_1720512870327"
        data = {
            "xxfcbj[]": ["0", "1"],
            "xxzrlx[]": "",
            "xxssdq[]": "",
            "xxhyzl[]": "",
            "xxzqdm": "",
            "xxzbqs": "",
            "typejb": "T",
            "neeqhyfl": "1",
            "sortfield": "xxzqdm",
            "sorttype": "asc",
            "page": "0"
        }
        yield scrapy.FormRequest(url, method='POST', formdata=data, callback=self.parse_list, cookies=self.cookies, headers=self.headers, meta={'data': data})

    def parse_list(self, response, **kwargs):
        lines = json.loads(re.findall(r"jQuery331_\d+\((.*)\)", response.text)[0])[0]
        print(lines)
        for line in lines['content']:
            stock_sector = None
            if line['xxfcbj'] == '1':
                stock_sector = '创新层'
            elif line['xxfcbj'] == '0':
                stock_sector = '基础层'
            item = ListedCompanyItem(**{
                'company_full_name': None,
                'company_eng_name': None,
                'stock_code': line['xxzqdm'],
                'stock_abb': line['xxzqjc'],
                'expand_stock_abb': None,
                'stock_sector': stock_sector,
                'stock_state': '上市',
                'legal_name': None,
                'board_secretary_name': None,
                'registered_address': None,
                'cor_address': None,
                'zip_code': None,
                'email': None,
                'list_date': parse(line['xxgprq']).date(),
                'total_share_capital': line['xxzgb'] / 10000,  # 总股本（万股）
                'limited_share_capital': None,  # 有限售流通股（万股）
                'out_share_capital': None,  # 流通股本（万股）
                'telphone': None,
                'website': None,  # 公司网址
                'listed_exchange': self.listed_exchange,
                'source_url': f"https://www.neeq.com.cn/products/neeq_listed_companies/general_information.html?companyCode={line['xxzqdm']}",  # 来源网址
                'delisting_date': None,  # 退市日期
                'security_type': '三板股票',
                'whet_not_st_company': '是' if 'ST' in line['xxzqjc'] else '否',
                'whet_not_delisting_organize_stocks': '是' if ('退市' in line['xxzqjc'] or line['xxzqjc'].endswith('退')) else '否',
                'province': line['xxssdq'],
                'taxpayer_industry1': None,
                'taxpayer_industry2': line['xxhyzl'].strip(),
                'trade_type': line['xxzrlx'],  # 交易方式
                'data_newest_query_date': datetime.date.today()  # 数据最新查询时间
            })
            url = "https://www.neeq.com.cn/nqhqController/detailCompany.do"
            params = {
                "callback": "jQuery331_1720766675672",
                "zqdm": line['xxzqdm'],
                "_": "1720766675673"
            }
            yield scrapy.Request(url + "?" + urlencode(params), callback=self.parse_detail, cookies=self.cookies, headers=self.headers, meta={'item': item})

        page_count = lines['totalPages']
        if response.meta.get("is_next") is not False:
            if page_count > 1:
                data = response.meta.get("data")
                for page_num in range(1, int(page_count)):
                    url = "https://www.neeq.com.cn/nqxxController/nqxxCnzq.do?callback=jQuery331_1720512870327"
                    logger.info(f"{self.listed_exchange} 分页: {page_num + 1} /{page_count}")
                    data['page'] = str(page_num)
                    yield scrapy.FormRequest(url, method='POST', formdata=data, callback=self.parse_list, cookies=self.cookies, headers=self.headers, meta={'data': data, 'is_next': False})

    def parse_detail(self, response, **kwargs):
        item = response.meta.get('item')
        data = json.loads(re.findall(r"jQuery331_\d+\((.*)\)", response.text)[0])
        item['company_full_name'] = data["baseinfo"]["name"]
        # print(item)
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl neeq".split())
