from commonresources.spider_items.base_item import convert_dict
from commonresources.spiders.basespider import BaseSpider
from scrapy.http import Request, FormRequest
from scrapy.selector import Selector
import re
import requests
import time


class JiangSuJianSheGongChengZhaoBiaoWangSpider(BaseSpider):
    '''
        江苏建设工程招标网 http://www.jszb.com.cn/JSZB/
        '''
    name = 'JiangSuJianSheGongChengZhaoBiaoWang'
    name_zh = '江苏建设工程招标网'
    province = "江苏"
    city = ''
    start_urls = ['http://www.jszb.com.cn/JSZB/']

    def __init__(self, full_dose=True, not_full_type=False, zb_info=False):
        self.convert_dict = convert_dict
        self.not_full_type = not_full_type
        self.zb_info = zb_info
        super(JiangSuJianSheGongChengZhaoBiaoWangSpider, self).__init__(full_dose)

    def parse(self, response):
        sel = Selector(response)
        more_nodes = sel.xpath('//td[@class="more"]//a')
        unique_urls = set()
        for node in more_nodes:
            href = node.xpath("./@href").extract()[0]
            if "YW_info" in href:
                if href not in unique_urls:
                    unique_urls.add(href)
        # unique_urls = ['YW_info/HouXuanRenGS/MoreInfo_HxrGS.aspx?categoryNum=012']
        for href in list(unique_urls):
            if self.zb_info and "ZhongBiaoGS" not in href:
                continue
            full_href = 'http://www.jszb.com.cn/JSZB/' + href
            fake_heads = self.get_fake_head(full_href)
            count = 0
            browser_cookie = {}
            while count <= 4:
                req = requests.get(full_href, headers=fake_heads, timeout=10)
                browser_cookie = requests.utils.dict_from_cookiejar(req.cookies)
                if 'ASP.NET_SessionId' not in browser_cookie:
                    count += 1
                    time.sleep(1)
                else:
                    break
            yield Request(full_href, callback=self.parse_list, cookies=browser_cookie, meta={"cookie": browser_cookie,})

    def parse_list(self, response):
        sel = Selector(response)
        cookies = response.meta['cookie']
        try:
            if 'announcement_type' not in response.meta:
                announcement_type = sel.xpath("//td[@class='white1']/text()").extract()[0]
            else:
                announcement_type = response.meta['announcement_type']
            announcement_type = announcement_type.replace("\r", "").replace("\n", "").replace(" ", "")
        except Exception as e:
            j = 1

        if 'page_key' not in response.meta:
            page_key = self.get_page_key(response.text)
        else:
            page_key = response.meta['page_key']
        need_break = False
        info_nodes = sel.xpath('//td[@id="%s_tdcontent"]//tr[@class="moreinfoline"]' % page_key)
        if not info_nodes:
            if 'old_message' in response.meta.keys():
                old_message = response.meta['old_message']
                old_message['formdata']['__EVENTARGUMENT'] = str(int(old_message['formdata']['__EVENTARGUMENT']) + 1)
                yield FormRequest(
                    url=old_message['url'],
                    method='post',
                    formdata=old_message['formdata'],
                    headers=old_message['headers'],
                    dont_filter=old_message['dont_filter'],
                    callback=self.parse_list,
                    cookies=old_message['cookies'],
                    meta={"cookie": old_message['cookies'],
                          'page_key': old_message['page_key'],
                          'old_message': old_message
                          }
                )
        else:
            for info_ in info_nodes:#获取详细页面信息
                td_nodes = info_.xpath("./td")
                onclick_str = td_nodes[1].xpath("./a/@onclick").extract()[0]

                announcement_title = td_nodes[1].xpath("./a/@title").extract()[0]

                project_type = td_nodes[2].xpath('./text()').extract()[0]
                release_time = td_nodes[3].xpath('./text()').extract()[0]


                project_type = project_type.replace("\r", "").replace("\n", "").replace(" ", "")
                release_time = release_time.replace("\r", "").replace("\n", "").replace(" ", "")

                need_break = self.check_if_need_break(release_time)
                if need_break:
                    break

                regex_str = "\(\"([^\“]*?)\","
                match = re.search(regex_str, onclick_str)
                if match:
                    click_url = match[1]
                    info_url = 'http://www.jszb.com.cn/jszb/YW_info' + click_url.replace("..", "").replace("%20", "")
                    info_url = info_url.strip()

                    yield Request(info_url, callback=self.parse_info, meta={"project_type": project_type,
                                                                            'release_time': release_time,
                                                                            'announcement_title': announcement_title,
                                                                            'announcement_type': announcement_type,
                                                                            'page_key': page_key},
                                  cookies=cookies)

            if not need_break:
                next_links = sel.xpath("//div[@id='%s_Pager']//a" % page_key)
                for a_link in next_links:
                    img_node = a_link.xpath("./img")
                    if img_node:
                        img_src = img_node.xpath("./@src").extract()[0]
                        if 'nextn.gif' in img_src:
                            href = a_link.xpath('./@href').extract()[0]
                            regex_str = "\((.*)\)"
                            match = re.search(regex_str, href)
                            if match:
                                page_key_pass, page_index = match[1].split(",")
                                next_page = page_index.replace("'", "")
                                # next_page = str(608)
                                page_key_pass = page_key_pass.replace("'", "")
                                print('###############################################')
                                print('这是第'+str(next_page)+'页')
                                print('###############################################')
                                fake_head = self.get_fake_head(response.url)
                                post_dict = self.get_post_info(sel)
                                post_dict['__EVENTARGUMENT'] = next_page
                                post_dict['__EVENTTARGET'] = page_key_pass
                                old_message={
                                    'url':response.url,
                                    'method' :'post',
                                    'formdata' : post_dict,
                                     'headers' : fake_head,
                                     'dont_filter' : True,
                                     'callback' : self.parse_list,
                                     'cookies' : cookies,
                                    'page_key': page_key,
                                }
                                yield FormRequest(
                                    url=response.url,
                                    method='post',
                                    formdata=post_dict,
                                    headers=fake_head,
                                    dont_filter=True,
                                    callback=self.parse_list,
                                    cookies=cookies,
                                    meta={"cookie": cookies,
                                          'page_key': page_key,
                                          'old_message':old_message
                                          }
                                )



    def get_fake_head(self, refer):

        fake_head = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cache-Control': 'max-age=0',
            'Content-Type': 'application/x-www-form-urlencoded',
            'Host': 'www.jszb.com.cn',
            'Origin': 'http://www.jszb.com.cn',
            'Proxy-Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': "1",
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36',
            'Referer': refer
        }
        return fake_head

    def get_post_info(self, sel):
        hidden_inputs_nodes = sel.xpath("//input[@type='hidden' or @type='text']")
        post_body = {}
        for node in hidden_inputs_nodes:
            try:
                name = node.xpath("./@name").extract()[0]
            except Exception as e:
                continue
            try:
                value = node.xpath("./@value").extract()[0]
            except Exception as e:
                value = ""
            post_body[name] = value

        select_nodes = sel.xpath("//select")
        for node in select_nodes:
            name = node.xpath("./@name").extract()[0]
            post_body[name] = "-1"
        return post_body

    def get_page_key(self, html_text):
        page_key = ""
        regex = 'id="(.*)_moreinfo"'
        match = re.search(regex, html_text)
        if match:
            page_key = match[1]
        return page_key
