import asyncio

from bs4 import BeautifulSoup
from loguru import logger

from com.arcfox.base.base_spider import BaseSpider, ExecuteType
from com.arcfox.middleware.async_redis_middleware import open_redis
from com.arcfox.util import async_request as requests
from com.arcfox.yiqing.parser.wjw_parser import WjwParser
from com.arcfox.yiqing.process.baidu_yiqing_processer import BaiduYiqingProceser
from com.arcfox.yiqing.task.yiqing_task import YiQingTask


class ShanghaiYiQingSpider(BaseSpider):

    def __init__(self):
        super().__init__()
        # self.url = "https://mp.weixin.qq.com/s/_Je5_5_HqBcs5chvH5SFfA"
        # # logger.info(sys.argv)
        # if len(sys.argv) > 1:
        #     self.url = sys.argv[1]
        self.processer = BaiduYiqingProceser()
        self.task = YiQingTask()
        self.url_wjw = "https://wsjkw.sh.gov.cn/xwfb/"
        self.wjw_parser = WjwParser()

    async def _pull_task(self):
        return [1, 2, 3], ExecuteType.ONCE

    async def fetch_url(self, task):
        '''
        只返回最新的一条url
        :param task:
        :return:
        '''
        urls = []
        for page in task:
            if page == 1:
                url = f"{self.url_wjw}index.html"
            else:
                url = f"{self.url_wjw}index_{page}.html"
            response = await requests.get_with_session(url, headers=self.get_default_header())
            if response.code == 200:
                urls += self.wjw_parser.parse_risk_url(response.response)
        logger.info(f"获取到待抓取Url: {urls[0]}")
        return urls[0]

    async def _crawl_by_task(self, task):
        url = await self.fetch_url(task)
        try:
            response = await requests.get_with_session(url, headers=self.get_default_header())
            soup = BeautifulSoup(response.response, "lxml")
            editor_tags = soup.find_all("section", {"data-tools": "135编辑器", "data-id": "72469"})
            data_list = []
            for editor_tag in editor_tags:
                span_tags = editor_tag.find_all("span", {"style": "font-size: 16px;"})
                span_list = span_tags[0:-1]
                for span in span_list:
                    content = span.text.replace('\xa0', '').replace(",", "").replace("。", "").replace("、", "").replace(
                        "，", "")
                    if ':' in content or "：" in content:
                        continue
                    data_list.append(content)
            logger.info(data_list)
            for result in data_list:
                point = await self.get_point(result)
                await self.processer.save_sh_yiqing_data(point)
                print(point)
        except Exception as e:
            logger.error(e)

    def get_headers(self):
        return {
            'Connection': 'keep-alive',
            'Content-Type': 'application/x-www-form-urlencoded',
            'Accept': '*/*',
            'Origin': 'https://ugc.map.baidu.com',
            'Referer': 'https://ugc.map.baidu.com/cube/ncp/homepage?ncpshare=yymap',
            'Cookie': 'BAIDUID=6DBC334C5D3B7ED4D4F34A0C6B42C7EA:FG=1; ugcid=1-655625122161dbba-1649484310%7C898938703'
        }

    async def get_point(self, point_name):
        url = "https://ugc.map.baidu.com/cube/ncp/searchpoi"
        query_param = str.encode(f"query={point_name}&region=上海市")
        headers = self.get_headers()
        headers['Content-Length'] = str(len(query_param))
        try:
            response = await requests.post_with_session(url, headers=headers, data=query_param,
                                                        timeout=5)
            result_json = response.json()
            logger.info(result_json)
            if result_json['code'] == 0:
                return result_json['result'][0]
        except Exception as e:
            logger.error(e)

    @open_redis
    async def init_data_version(self, client):
        pass


if __name__ == "__main__":
    asyncio.run(ShanghaiYiQingSpider().start_crawl())
