# -*- coding: utf-8 -*-
import scrapy
import json
import os
import time
import re
import random
from pprint import pprint



from PubRes.items import PubresItem


import logging
logger=logging.getLogger(__name__)

# import sys, io
# sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")


from str2dict import get_form_data
from refer import get_refer



POOL=[
    # '117.87.178.106:9000',
    # '47.93.36.195:8118',
    '175.44.148.151:9000'
]




class PrSpider(scrapy.Spider):
    name = 'pr'
    # allowed_domains = ['deal.ggzy.gov.cn']



    def start_requests(self):
        # self.ttl_page = 5
        url='http://deal.ggzy.gov.cn/ds/deal/dealList_find.jsp'
        with open(os.getcwd()+r'\PubRes\queries\query.json',encoding='utf-8') as f:
            data=json.load(f)
        for i_province in data['query']:
            logger.warning('省份是:')
            logger.warning(i_province)

            template=get_form_data()
            refer=get_refer()
            template['DEAL_PROVINCE'] = refer[i_province]  #将省份变成对应的编码
            template['PAGENUMBER'] = str(1)                #起始的时候默认第一页

            yield scrapy.FormRequest(url,
                                     meta={'template':template,
                                           'province':i_province,
                                           'stamp':str(int(time.time()))
                                           # 'proxy':'http://'+random.choice(POOL)
                                           },
                                     formdata = template,  #提交表单
                                     callback = self.parse)



    #根据第一页得到总共有几页,然后一个个请求下去
    def parse(self, response):
        ret = response.body.decode('utf-8')
        ret = dict(json.loads(ret))

        ttl_page = ret['ttlpage']   #得到一共有几页


        for i_page in range(ttl_page):
            # response.meta['proxy'] = 'http://' + random.choice(POOL)
            response.meta['template']['PAGENUMBER'] = str(i_page+1)  #修改页码
            response.meta['template']['correct']='a'                 #校验位,以示不同

            meta = response.meta
            template = response.meta['template']

            # logger.warning(meta['template']['PAGENUMBER'])  #打印当前发到第几页了
            time.sleep(1)

            yield scrapy.FormRequest(response.url,
                                     meta = meta,
                                     formdata = template,  #提交表单
                                     callback = self.page_parse)



    #线获取一部分参数
    def page_parse(self, response):
        time.sleep(1)
        ret = response.body.decode('utf-8')
        ret = dict(json.loads(ret))

        # logger.warning(response.meta['proxy'])
        logger.warning(response.meta['province'])  #打印现在爬的是那个省份
        logger.warning(ret['currentpage'])         #打印现在爬到第几页了

        data_list = ret['data']

        for i in data_list:
            item = PubresItem()
            item['province'] = response.meta['province']
            item['classify'] = i['classifyShow']
            item['title'] = i['title']
            item['content'] = 'nothing'  #内容放到下一个parse来获取,因为要发送新的请求
            item['time'] = i['timeShow']
            item['url'] = i['url']
            item['stamp'] = response.meta['stamp']


            new_url = re.sub('/a/','/b/',item['url'])  #构造用来访问内容的url
            yield scrapy.Request(url = new_url,
                                meta = {'item':item},  #将构建好的一部分数据传下去
                                callback = self.parse3)



    def parse3(self, response):
        item = response.meta['item']
        content = response.xpath('string(.)').extract_first()
        content = re.sub('\s+' , ' ' , content)
        item['content'] = content   #将内容填充完整
        yield item