# -*- coding: utf-8 -*-

import scrapy
from scrapy.selector import Selector

import logging
from copy import copy, deepcopy
import json
from HifoFzz.items import LandChinaItem, LandChinaGivingItem
import datetime

logger = logging.getLogger(__name__)


class LandchinaSpider(scrapy.Spider):
    name = 'landchina'
    allowed_domains = ['landchina.com']
    start_urls = ['https://www.landchina.com/']
    giving_notice_list_url = 'https://api.landchina.com/tGygg/transfer/list'  # POST 出让公告列表url
    giving_notice_detail_url = 'https://api.landchina.com/tGygg/transfer/detail'  # POST 出让公告详情url
    result_notice_list_url = 'https://api.landchina.com/tGdxm/result/list'  # POST 供地结果列表url
    result_notice_detail_url = 'https://api.landchina.com/tGdxm/result/detail'  # POST 供地结果详情url

    custom_settings = {
        'DEFAULT_REQUEST_HEADERS': {
            "Connection": "keep-alive",
            "Accept": "application/json, text/plain, */*",
            "Content-Type": "application/json",
            "Hash": "beb0d30fb7c6535cc3221b93beac18fb424c9015ecf5cf4befe6316f8043ccd9",
            "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36",
            "Origin": "https://landchina.com",
            "Referer": "https://landchina.com/",
            "Accept-Language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
        },
        'DOWNLOAD_DELAY': 3,
        'ITEM_PIPELINES': {
            'HifoFzz.pipelines.RedisConnPipeline': 299,  # 连接redis，如需启用redis中的代理，需要同时开启 XXXProxyMiddleware
            'HifoFzz.pipelines.LandChinaPipeline': 300,  # landchina
            'HifoFzz.pipelines.MongoClientPipeline': 399,
        }
    }

    def start_requests(self):
        # todo 第一次列表页会出现反爬，且尚不能识别，需处理
        page_num = 1
        page_size = 40
        # 获取供地结果列表首页
        result_params_dict = {"pageNum": page_num, "pageSize": page_size, "xzqDm": "50", "gyFs": "23",
                              "startDate": "2017-01-01 00:00:00",
                              "endDate": "2021-12-31 23:59:59", }
        yield scrapy.Request(
            url=self.result_notice_list_url,
            method='POST',
            meta=dict(page_num=page_num, page_size=page_size, type=2, ),
            body=json.dumps(result_params_dict),
        )
        # 获取出让公告列表首页
        giving_params_dict = {"pageNum": page_num, "pageSize": page_size, "xzqDm": "50", "ggLx": "23",
                              "startDate": "2017-01-01 00:00:00", "endDate": "2021-12-31 23:59:59"}
        giving_headers = {
            "Connection": "keep-alive",
            "Accept": "application/json, text/plain, */*",
            "Content-Type": "application/json",
            "Hash": "b25e3ee4c7926292049c56874ded71c8c5c36cc5dd3756879d6be2a71b858962",
            "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36",
            "Origin": "https://landchina.com",
            "Referer": "https://landchina.com/",
            "Accept-Language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
        }
        yield scrapy.Request(
            url=self.giving_notice_list_url,
            method='POST',
            headers=giving_headers,
            meta=dict(page_num=page_num, page_size=page_size, type=1, ),
            body=json.dumps(giving_params_dict),
        )

    def parse(self, response, **kwargs):
        type = copy(response.meta['type'])
        category = '供地结果' if type == 2 else '出让公告'
        page_num = copy(response.meta['page_num'])
        page_size = copy(response.meta['page_size'])
        try:
            resp_dict = json.loads(response.body.decode())
            data_dict = resp_dict.get('data', None)
            assert data_dict, f'{category}-第{page_num}页-列表页数据为空'
            total_page = data_dict['pages']
            current_page = data_dict['pageNum']
            next_page = data_dict['nextPage']
            data_list = data_dict.get('list', list())
            assert ((page_num == current_page and next_page == page_num + 1) or (
                    page_num == total_page)), f'{category}-第{page_num}页-页面被反爬'
        except AssertionError as e:
            logger.error(e)
        except Exception as e:
            logger.error(f'{category}-第{page_num}页-列表页数据提取出错，error:{e}')
        else:
            # 供地结果
            if type == 2:
                result_headers = {
                    "Connection": "keep-alive",
                    "Accept": "application/json, text/plain, */*",
                    "Content-Type": "application/json",
                    "Hash": "d565e13d67a7e0a2e16b0cae9282fb0e6c52e8d0e754f34f701a28f3dad3fc96",
                    "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36",
                    "Origin": "https://www.landchina.com",
                    "Sec-Fetch-Site": "same-site",
                    "Sec-Fetch-Mode": "cors",
                    "Sec-Fetch-Dest": "empty",
                    "Referer": "https://www.landchina.com/",
                    "Accept-Language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
                }
                for data in data_list:
                    item_result = LandChinaItem()
                    item_result['category'] = category
                    item_result['gdGuid'] = data['gdGuid']
                    item_result['xzqDm'] = data['xzqDm']
                    item_result['tdZl'] = data['tdZl']
                    item_result['gyFs'] = data['gyFs']
                    item_result['gyMj'] = data['gyMj']
                    item_result['tdYt'] = data['tdYt']
                    item_result['qdRq'] = data['qdRq']
                    item_result['xzqFullName'] = data['xzqFullName']
                    # 构造POST请求，获取供地结果详情
                    params = json.dumps({"gdGuid": item_result['gdGuid']})
                    if item_result['gdGuid']:
                        yield scrapy.Request(
                            url=self.result_notice_detail_url,
                            method='POST',
                            headers=result_headers,
                            body=params,
                            meta=dict(item_result=deepcopy(item_result), ),
                            callback=self.parse_result_detail,
                        )

                # 翻页
                if page_num <= total_page - 1:
                    result_params_dict = {"pageNum": next_page, "pageSize": page_size, "xzqDm": "50", "gyFs": "23",
                                          "startDate": "2017-01-01 00:00:00",
                                          "endDate": "2021-12-31 23:59:59", }
                    headers = {
                        'referer': 'https://landchina.com/',
                    }
                    yield scrapy.Request(
                        url=self.result_notice_list_url,
                        method='POST',
                        headers=headers,
                        meta=dict(page_num=next_page, page_size=page_size, type=2, ),
                        body=json.dumps(result_params_dict),
                        callback=self.parse,
                    )
            # 出让公告
            if type == 1:
                giving_headers = {
                    "Connection": "keep-alive",
                    "Accept": "application/json, text/plain, */*",
                    "Content-Type": "application/json",
                    "Hash": "0638515935e014ee554f4e4bd272298c952af2f27c6a17747a54c905a83e6e8b",
                    "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36",
                    "Origin": "https://landchina.com",
                    "Referer": "https://landchina.com/",
                    "Accept-Language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
                }
                for data in data_list:
                    item_giving = LandChinaGivingItem()
                    item_giving['category'] = category
                    item_giving['fbSj'] = data['fbSj']
                    item_giving['ggLx'] = data['ggLx']
                    item_giving['xzqDm'] = data['xzqDm']
                    item_giving['xzqFullName'] = data['xzqFullName']
                    item_giving['gyggGuid'] = data['gyggGuid']
                    item_giving['gyggBt'] = data['gyggBt']
                    # 构造POST请求，获取出让公告详情
                    giving_params = json.dumps({"gyggGuid": item_giving['gyggGuid']})
                    if item_giving['gyggGuid']:
                        yield scrapy.Request(
                            url=self.giving_notice_detail_url,
                            method='POST',
                            headers=giving_headers,
                            body=giving_params,
                            meta=dict(item_giving=deepcopy(item_giving), ),
                            callback=self.parse_giving_detail,
                        )
                # 翻页
                if page_num <= total_page - 1:
                    giving_params_dict = {"pageNum": next_page, "pageSize": page_size, "xzqDm": "50", "ggLx": "23",
                                          "startDate": "2017-01-01 00:00:00", "endDate": "2021-12-31 23:59:59"}
                    giving_detail_headers = deepcopy(giving_headers)
                    giving_detail_headers['Hash'] = "b25e3ee4c7926292049c56874ded71c8c5c36cc5dd3756879d6be2a71b858962"
                    yield scrapy.Request(
                        url=self.giving_notice_list_url,
                        method='POST',
                        headers=giving_detail_headers,
                        meta=dict(page_num=next_page, page_size=page_size, type=1, ),
                        body=json.dumps(giving_params_dict),
                        callback=self.parse,
                    )

    def parse_result_detail(self, response):
        """
        获取供地结果详情
        :return:
        """
        item_result = copy(response.meta['item_result'])
        try:
            resp_dict = json.loads(response.body.decode())
            data_dict = resp_dict.get('data', None)
            relate_dict = resp_dict.get('relate', list())[0]
            assert data_dict and relate_dict, \
                f'{item_result["category"]}-【{item_result["tdZl"]}/{item_result["gdGuid"]}】-详情页数据为空'
            gdGuid = data_dict['gdGuid']
            assert gdGuid == item_result["gdGuid"], \
                f'{item_result["category"]}-【{item_result["tdZl"]}/{item_result["gdGuid"]}】-详情页被反爬'
        except AssertionError as e:
            logger.error(e)
        except Exception as e:
            logger.error(
                f'{item_result["category"]}-【{item_result["tdZl"]}/{item_result["gdGuid"]}】-详情页数据提取出错，error:{e}')
        else:
            item_result['srr'] = data_dict.get('srr', None)  # 土地使用权人
            item_result['xmMc'] = data_dict.get('xmMc', None)  # 项目名称
            item_result['crNx'] = data_dict.get('crNx', None)  # 土地使用年限
            item_result['jzMj'] = data_dict.get('jzMj', None)  # 建筑面积
            item_result['minRjl'] = data_dict.get('minRjl', None)  # 容积率下限
            item_result['maxRjl'] = data_dict.get('maxRjl', None)  # 容积率上限
            item_result['tdJb'] = data_dict.get('tdJb', None)  # 土地级别
            item_result['dzBaBh'] = data_dict.get('dzBaBh', None)  # 电子监管号
            item_result['jgSj'] = self.transfer_date(data_dict.get('jgSj', None))  # 约定竣工时间
            item_result['jdSj'] = self.transfer_date(data_dict.get('jdSj', None))  # 约定交地时间
            item_result['sjJdSj'] = self.transfer_date(data_dict.get('sjJdSj', None))  # 实际交地时间
            item_result['dgSj'] = self.transfer_date(data_dict.get('dgSj', None))  # 约定开工时间
            qdRq = self.transfer_date(data_dict.get('qdRq', None))
            item_result['qdRq'] = qdRq if qdRq else item_result['qdRq']  # 签订日期
            item_result['pzRq'] = self.transfer_date(data_dict.get('pzRq', None))  # 批准日期
            item_result['zdBh'] = data_dict.get('zdBh', None)  # 宗地编号
            item_result['districtName'] = data_dict.get('area', None)  # 行政区
            item_result['cityName'] = '重庆市'  # 城市
            item_result['crBzj'] = relate_dict.get('crBzj', None)  # 保证金
            item_result['qsj'] = relate_dict.get('qsj', None)  # 起拍价
            item_result['cjJg'] = relate_dict.get('cjJg', None)  # 成交价
            item_result['gpSjS'] = self.transfer_date(relate_dict.get('gpSjS', None))  # 招拍挂开始时间
            item_result['gpSjE'] = self.transfer_date(relate_dict.get('gpSjE', None))  # 招拍挂结束时间
            item_result['gyggGuid'] = relate_dict.get('gyggGuid', None)  # 出让公告id
            item_result['mj'] = relate_dict.get('mj', None)  # 土地面积
            item_result['other'] = json.dumps(resp_dict, ensure_ascii=False)  # 其它
            item_result['fetchTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')  # 爬取时间
            item_result['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')  # 更新时间
            yield item_result

    def parse_giving_detail(self, response):
        """
        获取出让公告详情
        :param response:
        :return:
        """
        item_giving = copy(response.meta['item_giving'])
        try:
            resp_dict = json.loads(response.body.decode())
            data_dict = resp_dict.get('data', None)
            gyggnr_selector = Selector(text=data_dict['gyggNr'])
            table_list = gyggnr_selector.xpath("//table//div/table")
            assert len(table_list), \
                f'{item_giving["category"]}-【{item_giving["gyggBt"]}/{item_giving["gyggGuid"]}】-详情页数据为空'
        except AssertionError as e:
            logger.error(e)
        except Exception as e:
            logger.error(
                f'{item_giving["category"]}-【{item_giving["gyggBt"]}/{item_giving["gyggGuid"]}】-详情页提取出错，error:{e}')
        else:
            item_giving['cityName'] = '重庆市'  # 城市
            item_giving['gyggGuid'] = item_giving['gyggGuid']  # 出让公告id
            item_giving['gyggBt'] = gyggnr_selector.xpath("//table/tr[1]/td/text()").extract_first()  # 出让公告标题
            gygg_fbt = gyggnr_selector.xpath("//table/tr[2]/td/text()").extract_first()
            item_giving['gyggBh'], item_giving['fbSj'] = self.get_gyggbh_and_fbSj(gygg_fbt)  # 出让公告编号、发布时间
            for table_obj in table_list:
                item_giving['zdBh'] = table_obj.xpath(
                    ".//td[contains(text(),'宗地编号')]/following-sibling::td[1]/text()").extract_first()  # 宗地编号
                item_giving['zdZl'] = table_obj.xpath(
                    ".//td[contains(text(),'宗地坐落')]/following-sibling::td[1]/text()").extract_first()  # 宗地坐落
                item_giving['crBzj'] = table_obj.xpath(
                    ".//td[contains(text(),'保证金')]/following-sibling::td[1]/text()").extract_first()  # 保证金
                item_giving['qsj'] = table_obj.xpath(
                    ".//td[contains(text(),'起始价')]/following-sibling::td[1]/text()").extract_first()  # 起拍价
                item_giving['gpSjS'] = table_obj.xpath(
                    ".//td[contains(text(),'挂牌开始时间')]/following-sibling::td[1]/text()").extract_first()  # 招拍挂开始时间
                item_giving['gpSjE'] = table_obj.xpath(
                    ".//td[contains(text(),'挂牌截止时间')]/following-sibling::td[1]/text()").extract_first()  # 招拍挂结束时间
                item_giving['other'] = json.dumps(resp_dict, ensure_ascii=False)  # 其它
                item_giving['fetchTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')  # 爬取时间
                item_giving['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')  # 更新时间
                yield item_giving

    @staticmethod
    def transfer_date(stamp):
        """
        将时间戳转换为date
        :param stamp:
        :return:
        """
        try:
            date_obj = datetime.datetime.fromtimestamp(int(stamp) / 1000, None)
            date_str = date_obj.strftime("%Y-%m-%d")
        except:
            return stamp
        else:
            return date_str

    @staticmethod
    def get_gyggbh_and_fbSj(gygg_fbt: str):
        """
        提取 出让公告编号和发布时间
        :param gygg_fbt: '云阳告字[2021]32号    2021/12/30'
        :return:
        """
        try:
            ret_li = gygg_fbt.split()
            assert len(ret_li) == 2
        except:
            return gygg_fbt, gygg_fbt
        else:
            return ret_li[0], ret_li[1]
