# -*- coding: utf-8 -*-
import datetime
import json
import re
import time

import pandas as pd
import scrapy
from dateutil.parser import parse

from apps.listed_company.listed_company.items import (
    DelistingInformationSourceItem,
    NetListedCompanyIndustryItem,
)
from loguru import logger
from utils.tools import urlencode, urldecode

PATH = r"C:\Yakult\scrapy_spider\apps\listed_company\listed_company\spiders\上市公司行业分类数据采集方案.xlsx"


class NetListedCompanyIndustrySpider(scrapy.Spider):
    listed_exchange = "全国中小企业股份转让系统"
    name = "net_listed_company_industry"

    def start_requests(self):
        url = "https://www.baidu.com/"
        yield scrapy.Request(url, callback=self.parse_list)

    def parse_list(self, response, **kwargs):
        df = pd.read_excel(PATH, sheet_name=1, dtype="str")
        df["股票代码"] = df["股票代码"].apply(lambda x: x.zfill(6))
        data = df.to_dict("records")
        for row in data:
            item = NetListedCompanyIndustryItem(
                **{
                    "stock_code": row["股票代码"],
                    "stock_abb": row["股票简称"],
                    "industry1": row["一级行业分类名称"],
                    "industry2": row["二级行业分类名称"],
                }
            )
            print(item)
            yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl net_listed_company_industry".split())
