import datetime
import hashlib
import json
import re

import pymysql
import redis
import requests as requests
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule

from landchina_crawlSpider.items import LandchinaCrawlspiderItem
from landchina_crawlSpider.user_agent_list import random_ua


class LandSpider(CrawlSpider):
    name = 'land'
    # allowed_domains = ['www.xxx.com']
    # start_urls = ['https://www.landchina.com/default.aspx?tabid=263&ComName=default']

    link = LinkExtractor(allow=r'&recorderguid=.*?')
    rules = (
        Rule(link, callback='parse_item', follow=False),
    )

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.page_end = None
        self.red = redis.Redis(host='localhost', port=6379, db=0)
        self.mysql_conn = pymysql.connect(host='localhost', port=3306, database='landchina', user='root',
                                          password='ck123456')
        self.cur = self.mysql_conn.cursor()
        self.city_code = self.get_city_code()
        self.page_max = 200
        self.fd = open('landchina.csv', mode='a+', encoding='utf-8')

    def start_requests(self):
        print('start_requests')
        start_url = 'https://www.landchina.com/default.aspx?tabid=263&ComName=default'
        headers = {'User-Agent': random_ua()}
        # 可以另外添加一层循环控制时间参数
        start_time = '2018-1-1'
        end_time = datetime.datetime.now().date().strftime('%Y-%-m-%-d')
        # 控制行政区参数
        for city in self.city_code:
            # 起始页,每次循环重置一次
            page_index = 1
            # 终止页,每次循环重置一次
            page_end = 200
            land_id = city['land_id']
            land_name = city['land_name']
            while True:
                print(land_id, land_name, page_index)

                # page_end为空则结束当前循环
                if self.page_max is None:
                    break

                # page_index>page_end则已经获取所有页面数据，结束当前循环
                if page_index > self.page_max:
                    break

                TAB_QuerySubmitConditionData = '9f2c3acd-0256-4da2-a659-6949c4671a2a:' + start_time + '~' + end_time + \
                                               '|' + '42ad98ae-c46a-40aa-aacc-c0884036eeaf:' + land_id + u'▓' + '~' + \
                                               land_name
                # TAB_QuerySubmitConditionData含有特殊符号'▓'必须先编码才能作为form_data参数使用
                TAB_QuerySubmitConditionData = TAB_QuerySubmitConditionData.encode(
                    'gb18030')
                form_data = {
                    '__VIEWSTATE': '/wEPDwUJNjkzNzgyNTU4D2QWAmYPZBYIZg9kFgICAQ9kFgJmDxYCHgNzcmMFN1VzZXIvZGVmYXVsdC9VcGxvYWQvc3lzRnJhbWVJbWcveF90ZHNjdzIwMjBfZmxhc2hfMS5wbmdkAgEPZBYCAgEPFgIeBXN0eWxlBSBCQUNLR1JPVU5ELUNPTE9SOiNmM2Y1Zjc7Q09MT1I6O2QCAg9kFgICAQ9kFgJmD2QWAmYPZBYCZg9kFgRmD2QWAmYPZBYCZg9kFgJmD2QWAmYPZBYCZg8WBB8BBSBDT0xPUjojRDNEM0QzO0JBQ0tHUk9VTkQtQ09MT1I6Ox4HVmlzaWJsZWgWAmYPZBYCAgEPZBYCZg8PFgIeBFRleHRlZGQCAQ9kFgJmD2QWAmYPZBYCZg9kFgRmD2QWAmYPFgQfAQWHAUNPTE9SOiNEM0QzRDM7QkFDS0dST1VORC1DT0xPUjo7QkFDS0dST1VORC1JTUFHRTp1cmwoaHR0cDovL3d3dy5sYW5kY2hpbmEuY29tL1VzZXIvZGVmYXVsdC9VcGxvYWQvc3lzRnJhbWVJbWcveF90ZHNjd19zeV9qaGdnXzAwMC5naWYpOx4GaGVpZ2h0BQEzFgJmD2QWAgIBD2QWAmYPDxYCHwNlZGQCAg9kFgJmD2QWAmYPZBYCZg9kFgJmD2QWAmYPZBYCZg9kFgRmD2QWAmYPFgQfAQUgQ09MT1I6I0QzRDNEMztCQUNLR1JPVU5ELUNPTE9SOjsfAmgWAmYPZBYCAgEPZBYCZg8PFgIfA2VkZAICD2QWAmYPZBYEZg9kFgJmD2QWAmYPZBYCZg9kFgJmD2QWAmYPZBYCZg8WBB8BBSBDT0xPUjojRDNEM0QzO0JBQ0tHUk9VTkQtQ09MT1I6Ox8CaBYCZg9kFgICAQ9kFgJmDw8WAh8DZWRkAgIPZBYEZg9kFgJmD2QWAmYPZBYCZg9kFgICAQ9kFgJmDxYEHwEFhgFDT0xPUjojRDNEM0QzO0JBQ0tHUk9VTkQtQ09MT1I6O0JBQ0tHUk9VTkQtSU1BR0U6dXJsKGh0dHA6Ly93d3cubGFuZGNoaW5hLmNvbS9Vc2VyL2RlZmF1bHQvVXBsb2FkL3N5c0ZyYW1lSW1nL3hfdGRzY3dfenlfamdnZ18wMS5naWYpOx8EBQI0NhYCZg9kFgICAQ9kFgJmDw8WAh8DZWRkAgEPZBYCZg9kFgJmD2QWAmYPZBYCAgEPZBYCZg8WBB8BBSBDT0xPUjojRDNEM0QzO0JBQ0tHUk9VTkQtQ09MT1I6Ox8CaBYCZg9kFgICAQ9kFgJmDw8WAh8DZWRkAgMPZBYCAgMPFgQeCWlubmVyaHRtbAWuEDxwPjxzdHlsZSB0eXBlPSJ0ZXh0L2NzcyI+QTpsaW5rIHsgQ09MT1I6IzAwMDAwMDsgVEVYVC1ERUNPUkFUSU9OOk5vbmV9QTp2aXNpdGVkIHsgIENPTE9SOiMwMDAwMDA7IFRFWFQtREVDT1JBVElPTjpOb25lfUE6YWN0aXZlIHsgICAgICBDT0xPUjojMDAwMDAwOyBURVhULURFQ09SQVRJT046Tm9uZX1BOmhvdmVyIHsgICAgQ09MT1I6IzAwOTlGRjsgVEVYVC1ERUNPUkFUSU9OOk5vbmV9PC9zdHlsZT48L3A+PHA+PGJyIC8+Jm5ic3A7PC9wPjx0YWJsZT48dGJvZHk+PHRyIGNsYXNzPSJmaXJzdFJvdyI+PHRkIHZhbGlnbj0idG9wIiB3aWR0aD0iMzcwIiBzdHlsZT0iYm9yZGVyLWJvdHRvbTogMHB4IHNvbGlkOyBib3JkZXItbGVmdDogMHB4IHNvbGlkOyBib3JkZXItdG9wOiAwcHggc29saWQ7IGJvcmRlci1yaWdodDogMHB4IHNvbGlkIj48cCBzdHlsZT0idGV4dC1hbGlnbjogY2VudGVyIj48YSB0YXJnZXQ9Il9zZWxmIiBocmVmPSJodHRwczovL3d3dy5sYW5kY2hpbmEuY29tLyI+PGltZyB0aXRsZT0idGRzY3dfbG9nZTEucG5nIiBhbHQ9InRkc2N3X2xvZ2UxLnBuZyIgc3JjPSJodHRwczovL3d3dy5sYW5kY2hpbmEuY29tL25ld21hbmFnZS91ZWRpdG9yL3V0ZjgtbmV0L25ldC91cGxvYWQvaW1hZ2UvMjAyMDA2MTAvNjM3Mjc0MDYzNDI4NzcxMTA4MTExMTMxMi5wbmciIC8+PC9hPjwvcD48L3RkPjx0ZCB2YWxpZ249InRvcCIgd2lkdGg9IjYyMCIgc3R5bGU9ImJvcmRlci1ib3R0b206IDBweCBzb2xpZDsgYm9yZGVyLWxlZnQ6IDBweCBzb2xpZDsgd29yZC1icmVhazogYnJlYWstYWxsOyBib3JkZXItdG9wOiAwcHggc29saWQ7IGJvcmRlci1yaWdodDogMHB4IHNvbGlkIj48c3BhbiBzdHlsZT0iZm9udC1mYW1pbHk6IOWui+S9kywgU2ltU3VuOyBjb2xvcjogcmdiKDI1NSwyNTUsMjU1KTsgZm9udC1zaXplOiAxMnB4Ij7kuLvlip7vvJroh6rnhLbotYTmupDpg6jkuI3liqjkuqfnmbvorrDkuK3lv4PvvIjoh6rnhLbotYTmupDpg6jms5XlvovkuovliqHkuK3lv4PvvIk8L3NwYW4+PHA+PHNwYW4gc3R5bGU9ImZvbnQtZmFtaWx5OiDlrovkvZMsIFNpbVN1bjsgY29sb3I6IHJnYigyNTUsMjU1LDI1NSk7IGZvbnQtc2l6ZTogMTJweCI+5oyH5a+85Y2V5L2N77ya6Ieq54S26LWE5rqQ6YOo6Ieq54S26LWE5rqQ5byA5Y+R5Yip55So5Y+4Jm5ic3A7ICZuYnNwO+aKgOacr+aUr+aMge+8mua1meaxn+iHu+WWhOenkeaKgOiCoeS7veaciemZkOWFrOWPuDwvc3Bhbj48L3A+PHA+PHNwYW4gc3R5bGU9ImNvbG9yOiAjZmZmZmZmIj48c3BhbiBzdHlsZT0iZm9udC1mYW1pbHk6IOWui+S9kywgU2ltU3VuOyBmb250LXNpemU6IDEycHgiPjxhIGhyZWY9Imh0dHBzOi8vYmVpYW4ubWlpdC5nb3YuY24vIj48c3BhbiBzdHlsZT0iY29sb3I6ICNmZmZmZmYiPuS6rElDUOWkhzEyMDM5NDE05Y+3LTQ8L3NwYW4+PC9hPjwvc3Bhbj48L3NwYW4+PHNwYW4gc3R5bGU9ImZvbnQtZmFtaWx5OiDlrovkvZMsIFNpbVN1bjsgY29sb3I6IHJnYigyNTUsMjU1LDI1NSk7IGZvbnQtc2l6ZTogMTJweCI+Jm5ic3A7Jm5ic3A7Jm5ic3A7PGEgaHJlZj0iaHR0cHM6Ly93d3cuYmVpYW4uZ292LmNuL3BvcnRhbC9yZWdpc3RlclN5c3RlbUluZm8/cmVjb3JkY29kZT0xMTAxMDIwMjAwODk5MCI+PHNwYW4gc3R5bGU9ImNvbG9yOiAjZmZmZmZmIj7kuqzlhaznvZHlronlpIcxMTAxMDIwMjAwODk5MDwvc3Bhbj48L2E+Jm5ic3A7Jm5ic3A7Jm5ic3A76YKu566x77yabGFuZGNoaW5hMjE4QDE2My5jb20mbmJzcDsmbmJzcDs8c2NyaXB0IHR5cGU9InRleHQvamF2YXNjcmlwdCI+dmFyIF9iZGhtUHJvdG9jb2wgPSAoKCJodHRwczoiID09IGRvY3VtZW50LmxvY2F0aW9uLnByb3RvY29sKSA/ICIgaHR0cHM6Ly8iIDogIiBodHRwczovLyIpO2RvY3VtZW50LndyaXRlKHVuZXNjYXBlKCIlM0NzY3JpcHQgc3JjPSciICsgX2JkaG1Qcm90b2NvbCArICJobS5iYWlkdS5jb20vaC5qcyUzRjgzODUzODU5YzcyNDdjNWIwM2I1Mjc4OTQ2MjJkM2ZhJyB0eXBlPSd0ZXh0L2phdmFzY3JpcHQnJTNFJTNDL3NjcmlwdCUzRSIpKTs8L3NjcmlwdD48L3NwYW4+PC9wPjwvdGQ+PC90cj48L3Rib2R5PjwvdGFibGU+PHA+Jm5ic3A7PC9wPh8BBWRCQUNLR1JPVU5ELUlNQUdFOnVybChodHRwOi8vd3d3LmxhbmRjaGluYS5jb20vVXNlci9kZWZhdWx0L1VwbG9hZC9zeXNGcmFtZUltZy94X3Rkc2N3MjAxM195d18xLmpwZyk7ZGSROBpN7Ou6S2YtyT/YJE2rnjHfndNLarLWFJhIlQuyjA==',
                    '_VIEWSTATEGENERATOR': "CA0B0334",
                    '__EVENTVALIDATION': '/wEdAAISCq2FkCh/InrAaZFxC1vNCeA4P5qp+tM6YGffBqgTjY2TFC6PLXgOad3UkDIJ23GnLFsuDKRNysjMxLxyvjLD',
                    'hidComName': 'default',
                    'TAB_QueryConditionItem': ['9f2c3acd-0256-4da2-a659-6949c4671a2a',
                                               '42ad98ae-c46a-40aa-aacc-c0884036eeaf'],
                    'TAB_QuerySortItemList': '282:False',
                    'TAB_QuerySubmitConditionData': TAB_QuerySubmitConditionData,
                    'TAB_QuerySubmitOrderData': '282:False',
                    'TAB_RowButtonActionControl': '',
                    # TAB_QuerySubmitPagerData：控制页码
                    'TAB_QuerySubmitPagerData': str(page_index),
                    'TAB_QuerySubmitSortData': '',
                }
                yield scrapy.FormRequest(url=start_url, formdata=form_data, headers=headers, dont_filter=True,
                                         callback=self.parse_page, meta={'page_end': page_end})
                # page_index自增翻页
                page_index += 1
            print(land_name + '>>>已爬取完成')

    # 解析post请求response数据
    def parse_page(self, response):
        # print('parse_page')
        if self.page_max != self.page_max:
            page_source = response.xpath(
                '//tr/td[@class="pager"][1]/text()').extract()

            if page_source and page_source != 'NULL':
                self.page_max = re.findall(
                    '共(.*?)页.*?共.*?条记录', page_source[0], re.S)[0]
                self.page_max = int(self.page_max)
            else:
                self.page_max = None
            self.page_end = self.page_max
            # 必须return默认的_parse()函数，否则后续的rule规则无法生效
        return self._parse(response)

    # 解析详情页
    def parse_item(self, response):
        print('parse_item')
        item = LandchinaCrawlspiderItem()

        detail_url = response.url
        # print(detail_url)
        detail_url_id = hashlib.sha256(detail_url.encode()).hexdigest()

        try:
            ret = self.red.sadd('detail_url_id', detail_url_id)
            if ret:

                # 行政区
                division = response.xpath(
                    '//*[@id="mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r1_c2_ctrl"]/text()').extract()
                if division:
                    item['division'] = division[0]
                else:
                    item['division'] = 'None'

                # 项目位置
                location = response.xpath(
                    "//span[@id='mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r16_c2_ctrl']/text()").extract()
                if location:
                    item['location'] = location[0]
                else:
                    item['location'] = 'None'

                # 面积(公顷)
                area = response.xpath(
                    '//span[@id="mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r2_c2_ctrl"]/text()').extract()
                if area:
                    item['area'] = area[0]
                else:
                    item['area'] = 'None'

                # 土地用途
                purpose = response.xpath(
                    '//span[@id="mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r3_c2_ctrl"]/text()').extract()
                if purpose:
                    item['purpose'] = purpose[0]
                else:
                    item['purpose'] = 'None'

                # 成交价格(万元)
                price = response.xpath(
                    '//span[@id="mainModuleContainer_1855_1856_ctl00_ctl00_p1_f3_r2_c3_0_ctrl"]/text()').extract()
                if price:
                    item['price'] = price[0]
                else:
                    item['price'] = 'None'

                # 合同签订日期
                sign_date = response.xpath(
                    '//span[@id="mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r14_c4_ctrl"]/text()').extract()
                if sign_date:
                    item['sign_date'] = sign_date[0]
                else:
                    item['sign_date'] = 'None'

                # 电子监管号
                supervision_number = response.xpath(
                    '//span[@id="mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r1_c4_ctrl"]/text()').extract()
                if supervision_number:
                    item['supervision_number'] = supervision_number[0]
                else:
                    item['supervision_number'] = 'None'
                yield item
            else:
                pass
        except Exception as e:
            print(e)

    # 获取land_id,land_name作为post参数
    def get_city_code(self):
        city_list = []
        '''
        获取land_id，land_name
        :return:
        '''
        try:
            res = self.red.get('city_code')
            if not res:
                url = 'https://www.landchina.com/ExtendModule/WorkAction/EnumHandler.ashx'
                headers = {
                    "User-Agent": random_ua(),
                }
                for i in range(11, 67):
                    form_data = {
                        'id': str(i),
                        'group': '1'
                    }
                    response = requests.session().post(url=url, headers=headers, data=form_data)
                    response_json = response.json()
                    if response_json:
                        for city in response_json:
                            city_list.append(
                                {'land_id': city['value'], 'land_name': city['name']})
                self.red.set('city_code', json.dumps(city_list))
            return json.loads(res)
        except Exception as e:
            print(e)
