# -*- coding:utf-8 -*-
from scrapy.selector import Selector
from spidertools.utils.xpath_utils import get_alltext,get_all_tables
from spidertools.utils.text_utils import replace_punctuation
from spidertools.utils.snippets import combine_two_dict
from info_fsm import InfoMachine
import sys
from pprint import pprint
import requests
from table_info_extract.extract_utils import table_info_extract_styletwo,table_info_extract_stylethree
from table_info_extract import dict_mapping_triedTree
from table_info_extract.common_table_utils import common_table_extrator
from extractors.base_extractor import BaseExtractor
import os
import json
import re
from lxml import etree
from table_info_extract.table_fsm import TableExtractMachine


class ShanDongShengJiaoTongJianSheShiChangJianGuanGongGongFuWuPingTai(BaseExtractor):
    def __init__(self,info_dict):
        super(ShanDongShengJiaoTongJianSheShiChangJianGuanGongGongFuWuPingTai, self).__init__(info_dict)

    def start_parse(self):
        result = {}
        if '招标公告' in self.info_dict['公告类型']:
            result = self.parse_zhaobiao()
        elif '中标公告' in self.info_dict['公告类型']:
            result = self.parse_zhongbiao()

        return result

    def parse_zhaobiao(self):
        self.html = self.change(self.html)
        self.sel = Selector(text=self.html)
        table_list = self.sel.xpath('//table')
        table_extractor = TableExtractMachine()
        output_dict = {}
        try:
            title = self.get_title(self.html)
        except Exception as a:
            pass
        table_nodes = self.sel.xpath('//*[@id="ztbBidSectionRequireTable"]')
        table_number = len(self.sel.xpath('//*[@id="ztbBidSectionRequireTable"]'))
        if table_number ==8:#八个表格形式的
            table_text = table_nodes[1]#对第二个表格进行文本处理
            del table_nodes[1]
            clean_texts = []
            content_root_nodes = table_text.xpath('.//tr')
            if content_root_nodes:
                texts = []
                # 遍历所有子节点，获取相应的文本
                for node in content_root_nodes:
                    node_text = get_alltext(node)
                    texts.append(node_text)
                # 清理文本中的一些空几个，不可见字符，以及加载关键字中间的空格
                for text in texts:
                    text = self.change(text)
                    text = replace_punctuation(text.strip())
                    text = text.replace('\xa0', '')
                    text = text.replace('\t', '')
                    text = text.replace('\u3000', '')
                    clean_texts.append(text)
                # 执行状态机，解析整个文本
            machine = InfoMachine(self.base_pattern, extend_keywords=self.extend_keywords)
            output = machine.run_list(clean_texts)
            for table in table_nodes:
                result = table_extractor.run_table(table)
                output_dict = combine_two_dict(output_dict, result)
            output_dict = combine_two_dict(output_dict, output)
            output_dict = self.re_juhao(output_dict)
            return output_dict
        elif len(table_list)==2:
            output_dict['网页状态']='无'
        elif '中止公告' in title:
            pass
        else:#剩余全部的网页
            if self.sel.xpath('//*[@id="ztbBidSectionRequireTable"]'):
                table = self.sel.xpath('//*[@id="ztbBidSectionRequireTable"]')
                result = table_extractor.run_table(table)
                output_dict = combine_two_dict(output_dict, result)
            info_dict = output_dict
            content_root_nodes = self.sel.xpath('//body')
            if content_root_nodes:
                texts = []
                # 遍历所有子节点，获取相应的文本
                for node in content_root_nodes:
                    node_text = get_alltext(node)
                    texts.append(node_text)
                machine = InfoMachine(self.base_pattern,extend_keywords=self.extend_keywords)
                clean_texts = []
                # 清理文本中的一些空几个，不可见字符，以及加载关键字中间的空格
                for text in texts:
                    text = self.change(text)
                    text = replace_punctuation(text.strip())
                    text = text.replace('\xa0', '')
                    text = text.replace('\t', '')
                    text = text.replace('\u3000', '')
                    clean_texts.append(text)
                # 执行状态机，解析整个文本
                output = machine.run_list(clean_texts)
                output_dict = combine_two_dict(output, info_dict)
                output_dict = self.check_lockkey(title,output_dict)
                output_dict = self.re_juhao(output_dict)
                return output_dict

    def parse_zhongbiao(self):
        table_node = self.sel.xpath('//*[@id="dynamicNoticeTable"]/tbody/tr/td/table')[0]
        tr_list = table_node.xpath('tr')
        new_tr_list = []
        output_dict = {}
        biaoduan_list = []
        try:
            title = self.get_title(self.html)
        except Exception as a:
            pass
        for tr in tr_list:
            td_list = []
            td_number = tr.xpath('td')
            if len(td_number) == 3:
                i = 1
                for td in td_number:
                    if i > 1:
                        td = td.getall()[0]  # 获取包括标签的内容
                        td_list.append(td)
                        i += 1
                    else:  # 处理标段名称
                        text = td.xpath('string(.)').extract()[0]
                        text = text.replace('\xa0', '')
                        text = ''.join(text)
                        biaoduan_list.append(text)  # 其中存储了标段名称
                        i += 1

                x = ''.join(td_list)
                first_tr = '<tr>' + x + '</tr>'
                new_tr_list.append(first_tr)

            else:
                td_message = tr.xpath('td').getall()
                x = ''.join(td_message)
                new_tr = '<tr>' + x + '</tr>'
                new_tr_list.append(new_tr)
        del new_tr_list[0]
        x = 0  # 得到表格的个数
        for i in new_tr_list:
            if '中标单位' in i:
                x += 1
        if x == 1:
            new_table = ''.join('%s' % id for id in new_tr_list)  # 因为有数字，所以要用这个
            new_table = '<table>' + new_table + '</table>'
            self.sel = Selector(text=new_table)
            table_node = self.sel.xpath('//table')[0]
            table_extractor = TableExtractMachine()
            result = table_extractor.run_table(table_node)
            output_dict = result
            output_dict['工程招标信息']['标段名称'] = biaoduan_list[0]
            output_dict = self.check_lockkey(title, output_dict)
            return output_dict

        elif x > 1:
            output_dict = []
            temp = self.cut_list(new_tr_list, 6)  # 得到分割开的多个列表
            v = 0
            for table in temp:
                new_table = ''.join('%s' % id for id in table)  # 因为有数字，所以要用这个
                new_table = '<table>' + new_table + '</table>'
                self.sel = Selector(text=new_table)
                table_node = self.sel.xpath('//table')[0]
                table_extractor = TableExtractMachine()
                result = table_extractor.run_table(table_node)
                output = result
                output['工程招标信息']['标段名称'] = biaoduan_list[v]
                output = self.check_lockkey(title, output)
                output_dict.append(output)
                v += 1
            return output_dict
        else:
            print('网页中没有表格')


    def cut_list(self,listTemp, n):#用于切分中标公告中的表格
        temp = []
        for i in range(0, len(listTemp), n):
            temp.append(listTemp[i:i + n])
        return temp


    def change(self,text):

        text = text.replace('（','(')
        text = text.replace('）', ')')
        if '项目概况与招标范围' in text:
            text = text.replace('项目概况与招标范围', '项目简介:')
        if '项目概况' in text and '项目概况:' not in text and '项目概况：' not in text:
            text = text.replace('项目概况', '项目概况:')
        if '招标范围' in text and '招标范围:' not in text and '招标范围：' not in text:
            text = text.replace('招标范围', '招标范围:')
        if '一标段:' or '二标段:' in text:
            text = text.replace('一标段:','标段')
            text = text.replace('二标段:', '标段')
        if '其中:' or '其中：'  in text:
            text = text.replace('其中:', '')
            text = text.replace('其中：', '')
        if '(标段号)' in text:
            text = text.replace('(标段号)','')
        if '：施工单位：' in text:
            text = text.replace('：施工单位：', '不要:')
        if '项目经理' in text:
            text = text.replace('项目经理', '不要')
        if '负责人' in text:
            text = text.replace('负责人', '不要')
        return text



    def get_title(self,html):
        sel = Selector(text=html)
        title = sel.xpath('/html/body/table/tr/td/table/tr[1]/td/table[1]/tr[2]/td/div[1]')[0]
        title = title.xpath('string(.)').extract()[0]
        title = title.replace('\n', '')
        title = title.replace('\t', '')
        title = title.replace('\r', '')
        return title
    def check_lockkey(self,title,output_dict):
        title = self.find_project_name(title)
        try:
            if '工程基本信息' not in output_dict.keys():
                output_dict['工程基本信息'] ={}
            if '项目名称' not in output_dict['工程基本信息'].keys():
                output_dict['工程基本信息']['项目名称'] = title
        except Exception as a:
            pass

        try:
            if '标段名称' not in output_dict['工程招标信息'].keys():
                output_dict['工程招标信息']['标段名称'] = title
        except Exception as a:
            pass
        return output_dict


    def find_project_name(self,gonggao_name):  # 在取得项目名称时，遇如下非工程关键字，则保留项目完整名称

        if '招标公告' in gonggao_name:
            if '项目' in gonggao_name and '工程' in gonggao_name:
                project_name = re.findall("(.*?)项目", gonggao_name)[0] + '项目'
            elif '项目' in gonggao_name and '工程' not in gonggao_name:
                project_name = re.findall("(.*?)项目", gonggao_name)[0] + '项目'
            elif '项目' not in gonggao_name and '工程' in gonggao_name:
                project_name = re.findall("(.*?)工程", gonggao_name)[0] + '工程'
            else:
                project_name = gonggao_name.replace('招标公告', '')
        elif '招标公告' not in gonggao_name:
            if '项目' in gonggao_name and '工程' in gonggao_name:
                project_name = re.findall("(.*?)项目", gonggao_name)[0] + '项目'
            elif '项目' in gonggao_name and '工程' not in gonggao_name:
                project_name = re.findall("(.*?)项目", gonggao_name)[0] + '项目'
            elif '项目' not in gonggao_name and '工程' in gonggao_name:
                project_name = re.findall("(.*?)工程", gonggao_name)[0] + '工程'
            else:
                project_name = gonggao_name
        return project_name

    def re_juhao(self,output_dict):#用于解决过长问题，从句号截断
        try:
            if '。' in output_dict["工程基本信息"]["建筑面积"] and len(output_dict["工程基本信息"]["建筑面积"]) > 100:
                output_dict["工程基本信息"]["建筑面积"] = re.findall('(.*?)。', output_dict["工程基本信息"]["建筑面积"])[0]
        except Exception as a:
            pass
        try:
            if '。' in output_dict["工程招标信息"]["标段工期"] and len(output_dict["工程招标信息"]["标段工期"]) > 100:
                output_dict["工程招标信息"]["标段工期"] = re.findall('(.*?)。', output_dict["工程招标信息"]["标段工期"])[0]
        except Exception as a:
            pass

        return output_dict

if __name__ == '__main__':
    info_dict = {}
    for root, dirs, files, in os.walk('../../../demo_html/zb'):
        for i,name in enumerate(files):
            if "init" not in name:
                print(f"\033[32m{i+1}=============={name}\033[0m")
                with open(f'../../../demo_html/zb/{name}', 'r', encoding='utf-8') as fr:
                    html = fr.read()
                info_dict['html'] = html
                info_dict['公告类型'] = "中标公告"  # "中标结果公告"
                info_dict["source_type"] = "1"
                info_dict['province'] = "1"
                info_dict['_id'] = "1"
                info_dict["origin_url"] = "1"
                obj = ShanDongShengJiaoTongJianSheShiChangJianGuanGongGongFuWuPingTai(info_dict)
                result = obj.start_parse()
                pprint(result)