from commonresources.spiders.basespider import  BaseSpider
from scrapy.selector import Selector
from spidertools.utils.xpath_utils import get_alltext
from scrapy.http import Request,FormRequest
import re
import requests
import time


class YangZhouShiGongChengJianSheXinXiWangSpider(BaseSpider):
    '''
            扬州市工程建设信息网 http://www.yzcetc.com/Yzcetc/default.aspx
            '''
    name = 'YangZhouShiGongChengJianSheXinXiWang'
    name_zh = '扬州市工程建设信息网'
    province = "江苏"
    city = '扬州'

    start_urls = ['http://www.yzcetc.com/yzcetc/YW_Info/ZaoBiaoReport/MoreReportList_YZ_New.aspx?CategoryNum=003']

    def __init__(self, full_dose=False):
        super(YangZhouShiGongChengJianSheXinXiWangSpider, self).__init__(full_dose)


    def parse(self, response):

        sel = Selector(response)
        tag_nodex = sel.xpath("//td[@class='leftmenu']/a")
        for tag in tag_nodex:
            href = tag.xpath("./@href").extract()[0]
            announcement_type = tag.xpath("./font/text()").extract()[0]
            print(href)
            print(announcement_type)

            full_href = 'http://www.yzcetc.com/yzcetc/YW_Info/ZaoBiaoReport/MoreReportList_YZ_New.aspx?CategoryNum=003'#"http://www.yzcetc.com/" + href
            yield Request(full_href,callback=self.parse_list,meta={'announcement_type': announcement_type})#,cookies={"td_cookie":"825882616"})
            break

    def parse_list(self,response):
        sel = Selector(response)
        announcement_type = ""#response.meta["announcement_type"]
        #cookies = response.meta['cookie']
        regrex = "Tdname\s*=\s*'(.*)'"
        match = re.search(regrex,response.text)
        if match:
            page_id = match[1]

            need_break = False
            info_nodes = sel.xpath('//td[@id="%s_tdcontent"]//tr' % page_id)
            for info_ in info_nodes:
                td_nodes = info_.xpath("./td")
                if len(td_nodes) == 4:
                    onclick_str = td_nodes[1].xpath("./a/@onclick").extract()[0]

                    announcement_title = td_nodes[1].xpath("./a/text()").extract()[0]

                    project_type = td_nodes[2].xpath('./text()').extract()[0]
                    release_time = td_nodes[3].xpath('./text()').extract()[0]

                    project_type = project_type.replace("\r", "").replace("\n", "").replace(" ", "").replace("\t","")
                    release_time = release_time.replace("\r", "").replace("\n", "").replace(" ", "").replace("\t","")

                elif len(td_nodes) == 5:
                    pass


                #need_break = self.check_if_need_break(release_time)
                #if need_break:
                break

                regex_str = "\(\"([^\“]*?)\","
                match = re.search(regex_str, onclick_str)
                if match:
                    click_url = match[1]
                    url_list = response.url.split('/')
                    info_url = "/".join(url_list[:-1] + [click_url])
                    yield Request(info_url, callback=self.parse_info, meta={"project_type": project_type,
                                                                            'release_time': release_time,
                                                                            'announcement_title': announcement_title,
                                                                            'announcement_type': announcement_type})
                                  #cookies=cookies)

            if not need_break:
                next_links = sel.xpath("//div[@id='%s_Pager']//a" % page_id)
                for a_link in next_links:
                    text = a_link.xpath("./text()").extract()[0]
                    if text == "下一页":
                        href = a_link.xpath('./@href').extract()[0]
                        regex_str = "\((.*)\)"
                        match = re.search(regex_str, href)
                        if match:
                            page_key, page_index = match[1].split(",")
                            next_page = page_index.replace("'", "")
                            page_key = page_key.replace("'", "")

                            fake_head = self.get_fake_head(response.url)
                            post_dict = self.get_post_info(sel)
                            post_dict['__EVENTARGUMENT'] = next_page
                            post_dict['__EVENTTARGET'] = page_key
                            if 'MoreInfoList1$drpType' in post_dict:
                                post_dict['MoreInfoList1$drpType']="所有项目"

                            post_dict['MoreZhongBiaoGSList1$jpdDi']='07'

                            print("next_page" + str(next_page))
                            yield FormRequest(
                                url=response.url,
                                method='post',
                                formdata=post_dict,
                                headers=fake_head,
                                # 如果需要多次提交表单，且url一样，那么就必须加此参数dont_filter，防止被当成重复网页过滤掉了
                                dont_filter=True,
                                callback=self.parse_list,
                                #cookies=cookies,
                               #meta={"cookie": cookies},
                                #meta={'cookiejar': True},
                            )








    def get_fake_head(self,refer=None):

        fake_head = {
            'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'Accept-Encoding':'gzip, deflate',
            'Accept-Language':'zh-CN,zh;q=0.9',
            'Cache-Control':'max-age=0',
            'Content-Type':'application/x-www-form-urlencoded',
            'Proxy-Connection':'keep-alive',
            'Upgrade-Insecure-Requests':"1",
            'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36',
            'Origin': 'http://www.yzcetc.com'
        }
        if refer != None:
            fake_head['Referer'] = refer
        return fake_head

    def get_post_info(self,sel):
        hidden_inputs_nodes = sel.xpath("//input[@type='hidden' or @type='text']")
        post_body ={}
        for node in hidden_inputs_nodes:
            try:
                name = node.xpath("./@name").extract()[0]
            except Exception as e:
                continue
            try:
                value = node.xpath("./@value").extract()[0]
            except Exception as e:
                value = ""
            post_body[name] = value

        select_nodes = sel.xpath("//select")
        for node in select_nodes:
            name = node.xpath("./@name").extract()[0]
            post_body[name] = "-1"


        return post_body
