import scrapy
import requests
from bid.items import BidItem
from bid.tools import *


class FujianSpider(scrapy.Spider):
    name = 'fujian'
    allowed_domains = ['ggzyfw.fujian.gov.cn']
    start_urls = ['https://ggzyfw.fujian.gov.cn/']
    t_lis = [
        [
            '{"KIND":"GCJS","GGTYPE":"1","timeType":"6","BeginTime":"2020-03-29 00:00:00","EndTime":"%s 23:59:59","createTime":[],"pageNo":%s,"pageSize":10,"total":99999,"ts":%s}',
            '工程建设-招标公告','1'],
        [
            '{"KIND":"GCJS","GGTYPE":"2,3,7","timeType":"6","BeginTime":"2020-03-28 00:00:00","EndTime":"%s 23:59:59","createTime":[],"pageNo":%s,"pageSize":10,"total":995306,"ts":%s}',
            '工程建设-变更公告','2'],
        [
            '{"KIND":"GCJS","GGTYPE":"4","timeType":"6","BeginTime":"2020-03-28 00:00:00","EndTime":"%s 23:59:59","createTime":[],"pageNo":%s,"pageSize":10,"total":995306,"ts":%s}',
            '工程建设-中标候选人公示','2'],
        [
            '{"KIND":"GCJS","GGTYPE":"5","timeType":"6","BeginTime":"2020-03-28 00:00:00","EndTime":"%s 23:59:59","createTime":[],"pageNo":%s,"pageSize":10,"total":995306,"ts":%s}',
            '工程建设-中标结果公告','3'],
        [
            '{"KIND":"GCJS","GGTYPE":"6","timeType":"6","BeginTime":"2020-03-28 00:00:00","EndTime":"%s 23:59:59","createTime":[],"pageNo":%s,"pageSize":10,"total":995306,"ts":%s}',
            '工程建设-资质预审公告','2'],
    ]

    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36'}
    msg = []
    url = 'https://ggzyfw.fujian.gov.cn/Trade/TradeInfo'
    def start_requests(self):
        session = requests.session()
        for lis in self.t_lis:
            for page in range(1, 9999):
                data = lis[0]%(time.strftime('%Y-%m-%d'),page,int(time.time()*1000))
                self.headers.update({'portal-sign':execjs.compile(open('fujian.js').read()).call('get_sign', json.loads(data)),
                        'Content-Type': 'application/json;charset=UTF-8'})
                res = session.post('https://ggzyfw.fujian.gov.cn/Trade/TradeInfo',data=data,headers=self.headers).text
                res = re.findall('"Data": "(.*?)"',res,re.S)[0]
                res = aes_decrypt(res,'BE45D593014E4A4EB4449737660876CE', 'A8909931867B0425', AES.MODE_CBC)
                res = json.loads(res)
                ls = res['Table']
                last_page = res['PageTotal']
                if page > int(last_page):
                    break
                for l in ls:
                    item = {}
                    item['link'] = f'https://ggzyfw.fujian.gov.cn/web/index.html#/business/detail?cid={l["M_ID"]}&type={l["KIND"]}'
                    item['title'] = l['NAME']
                    item['time'] = l['TM'][:10]
                    item['classification'] = '福建-'+lis[1]
                    item['typ'] = lis[-1]
                    data_ = '{"cid": "%s", "ts": "%s", "type": "%s"}'%(l["M_ID"],int(time.time()*1000),l["KIND"])
                    self.headers.update({'portal-sign': execjs.compile(open('fujian.js').read()).call('get_sign', json.loads(data_)),
                            'Content-Type': 'application/json;charset=UTF-8'})
                    if redis_dupefilter(item) or item['time'].startswith('2021'):
                        self.msg.append(lis)
                        break
                    yield scrapy.Request(url='https://ggzyfw.fujian.gov.cn/Trade/TradeInfoDetail',body=data_,headers=self.headers, callback=self.get_url, meta={'item': item,'t':l['GGTYPE']})
                if lis in self.msg:
                    print(lis[1], '完成')
                    break
                time.sleep(len(ls))

    def get_url(self, response):
        html2 = re.findall('Data":\s*"(.*?)"', response.text, re.S)[0]
        html2 = aes_decrypt(html2, 'BE45D593014E4A4EB4449737660876CE', 'A8909931867B0425', AES.MODE_CBC)
        t = response.meta["t"]
        M_id = re.findall(f'"Type": {t}.*?M_ID": "(\d+)"', html2, re.S)[0]
        data_ = '{"type":%s,"m_id":"%s","ts":%s}' % (t, M_id, int(time.time() * 1000))
        self.headers.update({'portal-sign': execjs.compile(open('fujian.js').read()).call('get_sign', json.loads(data_)),
                        'Content-Type': 'application/json;charset=UTF-8'})
        yield scrapy.Request(url='https://ggzyfw.fujian.gov.cn/Trade/TradeInfoContent', body=data_, headers=self.headers,
                             callback=self.parse, meta={'item': response.meta["item"]})

    def parse(self, response):
        item = BidItem()
        item.update(response.meta['item'])
        res = response.json()['Data']
        res = aes_decrypt(res,'BE45D593014E4A4EB4449737660876CE','A8909931867B0425',AES.MODE_CBC)
        res = re.sub(r'\\r|\\n','',res).replace('\\','')
        res = re.findall('Contents":\s*"(.*?)"\s*}',res,re.S)[0]
        item['content'] = get_content(res, '')
        item = get_field(dict(item))
        yield item
