from commonresources.spiders.basespider import  BaseSpider
from scrapy.selector import Selector
from spidertools.utils.xpath_utils import get_alltext
from scrapy.http import Request,FormRequest
import re
import requests
import time


class ChangZhouGongChengJiaoYiWangSpider(BaseSpider):
    '''
            常州市建设工程交易网 http://www.czgcjy.com
                         http://www.czgcjy.com/czztb/
            '''
    name = 'ChangZhouGongChengJiaoYiWang'
    name_zh = '常州市建设工程'
    province = "江苏"
    city = '常州'

    start_urls = ['http://www.czgcjy.com/czztb/jyxx/']
    def __init__(self, full_dose=False):
        super(ChangZhouGongChengJiaoYiWangSpider, self).__init__(full_dose)

        self.announcement_dict = {
            'TD010001':'招标公告',
            'TD010002':'中标公示',
            'TD010003':'未入围公示',
            'TD010005':'中标人公告',
            'TD010006':"中标人公示"
        }

    def parse(self, response):
        sel = Selector(response)
        tags = sel.xpath('//td[starts-with(@id,"TD")]')
        current_domain = 'http://www.czgcjy.com'
        for tag in tags:
            announcement_id = tag.xpath('./@id').extract()[0]
            if announcement_id not in self.announcement_dict:
                announcement_type = ""
            else:
                announcement_type = self.announcement_dict[announcement_id]

            a_links = tag.xpath(".//td/a")
            for link in a_links:

                href = current_domain  + link.xpath("./@href").extract()[0]
                if href[-1] != "/":
                    href += "/"
                if 'moreinfo' not in href:
                    continue
                text = get_alltext(link).replace("\r\n","")
                announcement_type_pass = announcement_type
                if announcement_type_pass == "":
                    announcement_type_pass = text
                    project_type = ""
                else:
                    project_type = text

                fake_heads = self.get_fake_head(href)
                del fake_heads['Referer']
                del fake_heads['Content-Type']
                count = 0
                while count <= 4:

                    req = requests.get(href, headers=fake_heads, timeout=3)
                    browser_cookie = requests.utils.dict_from_cookiejar(req.cookies)
                    if 'ASP.NET_SessionId' not in browser_cookie:
                        count += 1
                        time.sleep(0.3)
                    else:
                        break

                if 'moreinfo' not in href:
                    yield Request(url=href,callback=self.parse_tag,meta={"announcement_type":announcement_type_pass,
                                                                         'project_type':project_type,
                                                                         "cookie":browser_cookie})#,headers=fake_heads)
                else:
                    yield Request(url=href, callback=self.parse_list, meta={"announcement_type": announcement_type_pass,
                                                                           'project_type': project_type,
                                                                           "cookie": browser_cookie})  # ,headers=fake_heads)
                break
            break

    def parse_tag(self,response):
        announcement_type = response.meta['announcement_type']
        project_type = response.meta['project_type']
        cookie =  response.meta['cookie']

        sel = Selector(response)
        more_link_node = sel.xpath('//a[starts-with(@href,"MoreInfo")]')
        if more_link_node:
            href = more_link_node.xpath('./@href').extract()[0]
            more_link = response.url + href

            yield Request(url=more_link,callback=self.parse_list,meta={"announcement_type":announcement_type,
                                                                     'project_type':project_type, "cookie":cookie},cookies=cookie)


    def parse_list(self,response):
        announcement_type = response.meta['announcement_type']
        project_type = response.meta['project_type']
        cookie = response.meta['cookie']

        if 'moreinfo_zbsl.aspx' in response.url:
            key = 'MoreInfoList_zbsl1_tdcontent'
            next_page_key = 'MoreInfoList_zbsl1_Pager'
        elif 'moreinfo_slzb.aspx' in response.url:
            key = 'MoreInfoList_slzb_tdcontent'
            next_page_key = 'MoreInfoList_slzb_Pager'
        else:
            key = "MoreInfoList1_tdcontent"
            next_page_key = 'MoreInfoList1_Pager'

        sel = Selector(response)

        need_break = False
        info_nodes = sel.xpath('//td[@id="%s"]//tr' % key)
        for info_ in info_nodes:
            td_nodes = info_.xpath("./td")
            href = td_nodes[1].xpath("./a/@href").extract()[0]

            announcement_title = td_nodes[1].xpath("./a/text()").extract()[0]

            release_time = td_nodes[2].xpath('./text()').extract()[0]

            release_time = release_time.replace("\r", "").replace("\n", "").replace(" ", "").replace("\t","")

            need_break = self.check_if_need_break(release_time)
            if need_break:
                break

            info_url = 'http://www.czgcjy.com' + href
            yield Request(info_url, callback=self.parse_info, meta={"project_type": project_type,
                                                                    'release_time': release_time,
                                                                    'announcement_title': announcement_title,
                                                                    'announcement_type': announcement_type},cookies=cookie)

        if not need_break:
            next_links = sel.xpath("//div[@id='%s']//a" % next_page_key)
            for a_link in next_links:
                img_node = a_link.xpath("./img")
                if img_node:
                    img_src = img_node.xpath("./@src").extract()[0]
                    if 'nextn.gif' in img_src:
                        href = a_link.xpath('./@href').extract()[0]
                        regex_str = "\((.*)\)"
                        match = re.search(regex_str, href)
                        if match:
                            page_key, page_index = match[1].split(",")
                            next_page = page_index.replace("'", "")
                            page_key = page_key.replace("'", "")

                            fake_head = self.get_fake_head(response.url)
                            post_dict = self.get_post_info(sel)
                            post_dict['__EVENTARGUMENT'] = next_page
                            post_dict['__EVENTTARGET'] = page_key
                            print("next_page" + str(next_page))
                            yield FormRequest(
                                url=response.url,
                                method='post',
                                formdata=post_dict,
                                headers=fake_head,
                                # 如果需要多次提交表单，且url一样，那么就必须加此参数dont_filter，防止被当成重复网页过滤掉了
                                dont_filter=True,
                                callback=self.parse_list,
                                cookies=cookie,
                                meta={"cookie": cookie,
                                      "project_type": project_type,
                                      'announcement_type': announcement_type
                                      },

                            )

    def get_fake_head(self, refer):

        fake_head = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cache-Control': 'max-age=0',
            'Content-Type': 'application/x-www-form-urlencoded',
            'Host': 'www.czgcjy.com',
            'Origin': 'http://www.czgcjy.com/',
            'Proxy-Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': "1",
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36',
            'Referer': refer
        }
        return fake_head

    def get_post_info(self, sel):
        hidden_inputs_nodes = sel.xpath("//input[@type='hidden' or @type='text']")
        post_body = {}
        for node in hidden_inputs_nodes:
            try:
                name = node.xpath("./@name").extract()[0]
            except Exception as e:
                continue
            try:
                value = node.xpath("./@value").extract()[0]
            except Exception as e:
                value = ""
            post_body[name] = value

        select_nodes = sel.xpath("//select")
        for node in select_nodes:
            name = node.xpath("./@name").extract()[0]
            post_body[name] = "-1"

        return post_body












