# -*- encoding: utf-8 -*-
import requests
from lxml import etree
from openpyxl import workbook

import re


# 招标公告
class Bidding_Spiders(object):

    def __init__(self):
        # 创建excel对象
        self.wb = workbook.Workbook()
        self.ws = self.wb.active
        self.ws.append(['标题', '招标项目编号', '招标日期', '项目概况', '项目名称', '项目实施地点', '设备名称', '数量', '简要技术规格', '网页链接', '所属行业'])

        # self.Name = input("请输入要查询的公司名称(例如：华虹半导体，请将公司名字输对)：")
        self.payload = {
            'fullText': '',
            'pubDate': '',
            'infoClassCodes': '0105',
            'normIndustry': '',
            'zoneCode': '',
            'fundSourceCodes': '',
            'poClass': 'BidNotice',
            'rangeType': '',
            'currentPage': ''

        }
        self.headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.74 Safari/537.36',
            'referer': 'https://www.chinabidding.com/search/proj.htm',
            'cookie': '__yjs_duid=1_f0abdf858ae3dfaf850adc425b857a641647931522280; Hm_lvt_3e8bc71035a13d1213b8be00baf17f95=1647945806,1647945809,1647945925,1647947063; JSESSIONID=B1B39D614C7687CB3F60F3092FD351FF; Hm_lpvt_3e8bc71035a13d1213b8be00baf17f95=1648009628',
            'Content-Type': 'application/x-www-form-urlencoded'
        }

    def get_data_one(self, url):
        response = requests.post(url=url, headers=self.headers, data=self.payload)
        if response.status_code == 200:
            # print(response.status_code)
            # print(response.text)
            return response.text

    def parse_main_page(self):
        all_list = ['华虹半导体', '长江存储', '上海华力', '上海积塔', '合肥晶合', '中芯绍兴', '株洲中车时代']
        for name in range(0, len(all_list)):
            print(f'----------------------正在下载{all_list[name]}--------------------')
            self.payload['fullText'] = all_list[name]
            for page in range(1, 2):
                print(f'----------------------正在下载第{page}页--------------------')
                self.payload['currentPage'] = page
                html_one_text = etree.HTML(self.get_data_one(url))
                # 取出链接
                l_properties = html_one_text.xpath('//*[@id="lab-show"]/div[2]/div[2]/div/ul/li/a/@href')
                # 取出所属行业
                l_hanye = html_one_text.xpath(
                    '//*[@id="lab-show"]/div[2]/div[2]/div/ul/li/a/div/dl/dd/span[1]/strong/text()')
                # 标题
                titles = html_one_text.xpath('//*[@id="lab-show"]/div[2]/div[2]/div/ul/li/a/h5/span/@title')
                # print(l_properties)
                # 进入每个链接并解析保存
                for i, hangye, tit in zip(l_properties, l_hanye, titles):
                    try:
                        response = requests.get(url=i, headers=self.headers, data=self.payload)
                        str_number = ''.join(tit).split('公告')[1]
                        Number = int(re.findall(r'\d+', str_number)[0])
                        if Number == 1:
                            self.parse_data(response, i, hangye, tit)
                    except:
                        pass

    def parse_data(self, response, i, hangye, tit):
        html_data = response
        dt = etree.HTML(response.text)
        # 标题
        title = tit

        # '招标日期', '', '', '', ', '', ', '
        dt_time = ''.join(dt.xpath('//*[@id="lab-show"]/div[2]/div[1]/div/p/span/text()')).split('发布时间：')[1]
        # 招标项目编号'
        dt_bianhao = ''.join(dt.xpath('//*[@id="lab-show"]/div[2]/div[1]/div/div[2]/text()[7]')).split('招标项目编号:')[1]
        # 项目概况
        dt_gaikuang = ''.join(dt.xpath('//*[@id="lab-show"]/div[2]/div[1]/div/div[2]/text()[8]')).split('招标项目名称:')[1]
        # 项目名称
        dt_mingcheng = ''.join(dt.xpath('//*[@id="lab-show"]/div[2]/div[1]/div/div[2]/text()[8]')).split('招标项目名称:')[1]
        # 项目实施地点
        dt_didian = ''.join(dt.xpath('//*[@id="lab-show"]/div[2]/div[1]/div/div[2]/text()[9]')).split('项目实施地点:')[1]
        # 网页链接
        dt_url = ''.join(i)
        # 所属行业'
        dt_hangye = hangye

        self.equipment_data(html_data, title, dt_bianhao, dt_time, dt_gaikuang, dt_mingcheng, dt_didian, dt_url,
                            dt_hangye)

        print('标题：%s' % title)
        print('招标日期：%s' % dt_time)
        print('招标项目编号：%s' % dt_bianhao)
        print('项目概况：%s' % dt_gaikuang)
        print('项目名称：%s' % dt_mingcheng)
        print('项目实施地点：%s' % dt_didian)
        print('网页链接：%s' % dt_url)
        print('所属行业: %s' % dt_hangye)
        print('====' * 20)

    def equipment_data(self, html_data, title, dt_bianhao, dt_time, dt_gaikuang, dt_mingcheng, dt_didian, dt_url,
                       dt_hangye):
        stp2 = html_data.text.split(r"<table width='400px' style='border-collapse: collapse;' border='1'>")[1].split(
            '</table>')[0]
        stp3 = stp2.replace('\n', '').replace('\t', '').replace(' ', '').replace('</tr>', '').replace('</td>', '')
        stp4 = stp3.split('<tr>')
        body = []  # 定义一个空列表，用来装每行字符串用
        # for循环，循环处理每行字符串
        for row in stp4:
            '''由于上一步的处理结果中第一行是空行，因此，这一步判断如果为非空行则继续进行处理'''
            if '<td>' in row:
                # 根据“<td>”分片
                td = row.split('<td>')
                # 往列表body中追加每行字符串
                body.append(td)
            else:
                print('========================')
        for kk in range(1, len(body)):
            # print(body[kk])
            name = body[kk][2]
            count = body[kk][3]
            count1 = re.findall(r'\d+', count)[0]
            Specifications = body[kk][4]
            self.save_data(title, dt_bianhao, dt_time, dt_gaikuang, dt_mingcheng, dt_didian, name, count1,
                           Specifications,
                           dt_url, dt_hangye)

            print(f'设备名称：{name}=====数量：{count1}========规格：{Specifications}')
            print('======' * 10)
        # print(html_text)

    def save_data(self, title, dt_bianhao, dt_time, dt_gaikuang, dt_mingcheng, dt_didian, name, count1,
                  Specifications,
                  dt_url,
                  dt_hangye):
        my_list = [title, dt_bianhao, dt_time, dt_gaikuang, dt_mingcheng, dt_didian, name, count1,
                   Specifications,
                   dt_url,
                   dt_hangye]  # 以列表形式写入数据
        self.ws.append(my_list)
        self.wb.save(filename="excel__%s招标.xlsx" % self.payload['fullText'])


# 变更公告
class Change_Spiders(object):

    def __init__(self):
        # 创建excel对象
        self.wb = workbook.Workbook()
        self.ws = self.wb.active
        self.ws.append(['标题', '发布时间', '招标项目编号', '项目名称', '项目名称(英文)', '招标人', '招标机构', '招标机构代码', '招标方式', '投标报价方式', '招标结果'])

        # self.Name = input("请输入要查询的公司名称(例如：华虹半导体，请将公司名字输对)：")
        self.p = {
            'http': 'http://180.120.183.166:4231',
            'https': 'https://180.120.183.166:4231'
        }
        self.payload = {
            'fullText': '',
            'pubDate': '',
            'infoClassCodes': '0106',
            'normIndustry': '',
            'zoneCode': '',
            'fundSourceCodes': '',
            'poClass': 'BidChange',
            ' rangeType': '',
            'currentPage': '1',
        }
        self.headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.74 Safari/537.36',
            'referer': 'https://www.chinabidding.com/search/proj.htm',
            'cookie': '__yjs_duid=1_f0abdf858ae3dfaf850adc425b857a641647931522280; Hm_lvt_3e8bc71035a13d1213b8be00baf17f95=1647945806,1647945809,1647945925,1647947063; JSESSIONID=A6A16E2EBBCA1BDFB13A1B57F761EF53; Hm_lpvt_3e8bc71035a13d1213b8be00baf17f95=1648017224',
            'Content-Type': 'application/x-www-form-urlencoded'
        }

    def get_data_one(self, url):
        all_list = ['华虹半导体', '长江存储', '上海华力', '上海积塔', '合肥晶合', '中芯绍兴', '株洲中车时代']
        for name in range(0, len(all_list)):
            print(f'----------------------正在下载{all_list[name]}--------------------')
            self.payload['fullText'] = all_list[name]
            for page in range(1, 2):
                print(f'----------------------正在下载第{page}页--------------------')
                self.payload['currentPage'] = page
                response = requests.request("POST", url, headers=self.headers, data=self.payload, )
                if response.status_code == 200:
                    html_data = etree.HTML(response.text)
                    # 标题
                    titles = html_data.xpath('//*[@id="lab-show"]/div[2]/div[2]/div/ul/li/a/h5/span[2]/@title')
                    # 链接
                    links = html_data.xpath('//*[@id="lab-show"]/div[2]/div[2]/div/ul/li/a/@href')
                    # 发布时间
                    times = html_data.xpath('//*[@id="lab-show"]/div[2]/div[2]/div/ul/li/a/h5/span[3]/text()')
                    for title, time, link in zip(titles, times, links):
                        try:
                            t1 = ''.join(title).split('公告')[1]
                            Number = int(re.findall(r'\d+', t1)[0])
                            if Number == 1:
                                self.parse_data(title, time, link)
                        except:
                            pass

    def parse_data(self, title, time, link):
        response = requests.post(link, headers=self.headers, data=self.payload, ).text
        str1_list = response.split('<section class="text">')[1]

        str2 = ''.join(str1_list.split('</section>')).strip().split('</div>')[0]
        str3 = str2.replace('\n', '').replace('\r', '').replace(' ', '')
        data_list = str3.split('<br/>')
        dict_data = {}
        try:
            for data in data_list:
                if data == '':
                    pass
                else:
                    key = data.split('：')[0]
                    value = data.split('：')[1]
                    dict_data[key] = value
            print(dict_data['招标项目编号'])
            print(dict_data['项目名称'])
            print(dict_data['项目名称(英文)'])
            print(dict_data['招标人'])
            print(dict_data['招标机构'])
            print(dict_data['招标机构代码'])
            print(dict_data['招标方式'])
            print(dict_data['投标报价方式'])
            print(dict_data['招标结果'])
            self.save_data(title, time.split('发布时间：')[1], dict_data['招标项目编号'], dict_data['项目名称'], dict_data['项目名称(英文)'],
                           dict_data['招标人'],
                           dict_data['招标机构'], dict_data['招标机构代码'], dict_data['招标方式'], dict_data['投标报价方式'],
                           dict_data['招标结果'])
        except:
            self.save_data(title, link, '', '', '', '', '', '', '', '', '')
            print(link)
        print('=======' * 15)

    def save_data(self, title, time, bh, mc, mc_eg, zbr, jg, daima, fangshi, toubiao, jieguo):
        my_list = [title, time, bh, mc, mc_eg, zbr, jg, daima, fangshi, toubiao, jieguo]
        self.ws.append(my_list)
        self.wb.save(filename="excel__%s招标变更.xlsx" % self.payload['fullText'])


# 评标公告
class TB_Spiders(object):
    def __init__(self):
        # 创建excel对象
        self.wb = workbook.Workbook()
        self.ws = self.wb.active
        self.ws.append(['标题', '招标项目编号', '招标日期', '项目概况', '项目名称', '项目实施地点', '设备名称', '数量', '简要技术规格', '网页链接', '所属行业'])

        # self.Name = input("请输入要查询的公司名称(例如：华虹半导体，请将公司名字输对)：")
        self.payload = {
            'fullText': '长江存储',
            'pubDate': '',
            'infoClassCodes': '0107',
            'normIndustry': '',
            'zoneCode': '',
            'fundSourceCodes': '',
            'poClass': 'BidResult',
            'rangeType': '',
            'currentPage': '1',

        }
        self.headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.82 Safari/537.36',
            'referer': 'https://www.chinabidding.com/search/proj.htm',
            'cookie': '__yjs_duid=1_f0abdf858ae3dfaf850adc425b857a641647931522280; Hm_lvt_3e8bc71035a13d1213b8be00baf17f95=1647945806,1647945809,1647945925,1647947063; yjs_use_ob=0; JSESSIONID=436B448AE1542E7A77799634EAED8D99; Hm_lpvt_3e8bc71035a13d1213b8be00baf17f95=1648034655',
            'Content-Type': 'application/x-www-form-urlencoded'
        }

    def get_data_one(self, url):
        response = requests.request("POST", url, headers=self.headers, data=self.payload)
        all_list = ['中芯绍兴', '株洲中车时代']
        for name in range(0, len(all_list)):
            print(f'----------------------正在下载{all_list[name]}--------------------')
            self.payload['fullText'] = all_list[name]
            for page in range(1, 2):
                print(f'----------------------正在下载第{page}页--------------------')
                self.payload['currentPage'] = page
                response = requests.request("POST", url, headers=self.headers, data=self.payload, )
                if response.status_code == 200:
                    html_data = etree.HTML(response.text)
                    # 标题
                    titles = html_data.xpath('//*[@id="lab-show"]/div[2]/div[2]/div/ul/li/a/h5/span/@title')
                    # 链接
                    links = html_data.xpath('//*[@id="lab-show"]/div[2]/div[2]/div/ul/li/a/@href')
                    # 发布时间
                    times = html_data.xpath('//*[@id="lab-show"]/div[2]/div[2]/div/ul/li/a/h5/span[3]/text()')
                    for title, time, link in zip(titles, times, links):
                        try:
                            t1 = ''.join(title).split('公告')[1]
                            Number = int(re.findall(r'\d+', t1)[0])
                            if Number == 1:
                                self.parse_data(title, time, link)
                        except:
                            pass

    def parse_data(self, title, time, link):
        response = requests.post(link, headers=self.headers, data=self.payload, ).text
        str = response.split('<div class="as-article-body table-article">')[1]
        str1 = str.split('<!--div class="login-prompt">')[0].strip()
        str2 = str1.replace('\n', '').replace('\r', '').replace(' ', '')
        data_list = str2.split('<br/>')
        dict_data = {}
        print(title)
        for data in data_list:
            if data == '':
                pass
            else:
                key = data.split('：')[0]
                value = data.split('：')[1]
                dict_data[key] = value
        if dict_data['中标候选人名单'] == '无中标人':
            print(dict_data['中标候选人名单'])
            print('====='*20)
        else:
            self.table(response)

    def table(self,html_data):
        stp2 = html_data.split(r"<table width='400px' style='border-collapse: collapse;' border='1'>")[1].split(
            '</table>')[0]
        stp3 = stp2.replace('\n', '').replace('\t', '').replace(' ', '').replace('</tr>', '').replace('</td>', '')
        stp4 = stp3.replace("<tr><thwidth='30px'>",'').replace("<tdalign='center'style='word-wrap:break-word;word-break:break-all;'",'')
        stp5 = stp4.split("</th><tr><tdalign='center'>")[1].split("<tr><tdalign='center'>")
        print(stp5)
        print('====='*20)



# 中标公告
class ZB_Spaders():
    def __init__(self):
        # 创建excel对象
        self.wb = workbook.Workbook()
        self.ws = self.wb.active
        self.ws.append(['标题', '时间', '项目名称', '招标项目编号', '招标范围', '招标机构', '招标人', '开标时间', '公示时间', '评标公示截至时间', '中标人', '制造商', '制造国家或地区', '所属行业'])
        self.p = {
            'http': 'http://180.120.183.166:4231',
            'https': 'https://180.120.183.166:4231'
        }
        self.payload = {
            'fullText': '',
            'pubDate': '',
            'infoClassCodes': '0106',
            'normIndustry': '',
            'zoneCode': '',
            'fundSourceCodes': '',
            'poClass': 'BidChange',
            ' rangeType': '',
            'currentPage': '1',
        }
        self.headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.74 Safari/537.36',
            'referer': 'https://www.chinabidding.com/search/proj.htm',
            'cookie': '__yjs_duid=1_f0abdf858ae3dfaf850adc425b857a641647931522280; Hm_lvt_3e8bc71035a13d1213b8be00baf17f95=1647945806,1647945809,1647945925,1647947063; JSESSIONID=A6A16E2EBBCA1BDFB13A1B57F761EF53; Hm_lpvt_3e8bc71035a13d1213b8be00baf17f95=1648017224',
            'Content-Type': 'application/x-www-form-urlencoded'
        }

    def get_data_one(self, url):
        all_list = ['华虹半导体', '长江存储', '上海华力', '上海积塔', '合肥晶合', '中芯绍兴', '株洲中车时代']
        for name in range(0, len(all_list)):
            print(f'----------------------正在下载{all_list[name]}--------------------')
            self.payload['fullText'] = all_list[name]
            for page in range(1, 2):
                print(f'----------------------正在下载第{page}页--------------------')
                self.payload['currentPage'] = page
                response = requests.request("POST", url, headers=self.headers, data=self.payload, )
                if response.status_code == 200:
                    html_data = etree.HTML(response.text)
                    # 标题
                    titles = html_data.xpath('//*[@id="lab-show"]/div[2]/div[2]/div/ul/li/a/h5/span[2]/@title')
                    # 链接
                    links = html_data.xpath('//*[@id="lab-show"]/div[2]/div[2]/div/ul/li/a/@href')
                    # 发布时间
                    times = html_data.xpath('//*[@id="lab-show"]/div[2]/div[2]/div/ul/li/a/h5/span[3]/text()')
                    # 所属行业
                    l_hangye = html_data.xpath('//*[@id="lab-show"]/div[2]/div[2]/div/ul/li[1]/a/div/dl/dd/span[1]/strong/text()')
                    for title, time, link in zip(titles, times, links):
                        try:
                            t1 = ''.join(title).split('公告')[1]
                            Number = int(re.findall(r'\d+', t1)[0])
                            if Number == 1:
                                self.parse_data(title, time, link)
                        except:
                            pass

    def parse_data(self, title, time, link, l_hangye):
        response = requests.post(link, headers=self.headers, data=self.payload, ).text
        dt = etree.HTML(response)
        # 标题
        dt_title = ''.join(title).split('发布时间：')[1]
        # 时间
        dt_time = ''.join(time).split('发布时间：')[1]
        # 项目名称
        dt_xiangmumingcheng = ''.join(dt.xpath('//*[@id="lab-show"]/div[2]/div[1]/div/div[2]/text()[2]')).split('项目名称：')[1]
        # 招标项目编号
        dt_xiangmubianhao = ''.join(dt.xpath('//*[@id="lab-show"]/div[2]/div[1]/div/div[2]/text()[2]')).split('项目编号：')[1]
        # 招标范围
        dt_fanwei = ''.join(dt.xpath('//*[@id="lab-show"]/div[2]/div[1]/div/div[2]/text()[3]')).split('招标范围：')[1]
        # 招标机构
        dt_zhaobiaojigou = ''.join(dt.xpath('//*[@id="lab-show"]/div[2]/div[1]/div/div[2]/text()[4]')).split('招标机构：')[1]
        # 招标人
        dt_zhaobiaoren = ''.join(dt.xpath('//*[@id="lab-show"]/div[2]/div[1]/div/div[2]/text()[5]')).split('招标人：')[1]
        # 开标时间
        dt_kaibiao = ''.join(dt.xpath('//*[@id="lab-show"]/div[2]/div[1]/div/div[2]/text()[6]')).split('开标时间：')[1]
        # 公示时间
        dt_kaishi = ''.join(dt.xpath('//*[@id="lab-show"]/div[2]/div[1]/div/div[2]/text()[7]')).split('公示时间：')[1]
        # 评标公示截至时间
        dt_jiezhi = ''.join(dt.xpath('//*[@id="lab-show"]/div[2]/div[1]/div/div[2]/text()[8]')).split('中标结果公示时间：')[1]
        # 中标人
        dt_zhongbiaoren = ''.join(dt.xpath('//*[@id="lab-show"]/div[2]/div[1]/div/div[2]/text()[9]')).split('中标人：')[1]
        # 制造商
        dt_zhizao = ''.join(dt.xpath('//*[@id="lab-show"]/div[2]/div[1]/div/div[2]/table/tbody/tr[2]/td[3]/text()')).split('制造商：')[1]
        # 制造国家或地区
        dt_guojiadiqu = ''.join(dt.xpath('//*[@id="lab-show"]/div[2]/div[1]/div/div[2]/text()[11]')).split('制造国家或地区：')[1]
        # 所属行业
        dt_hangye = l_hangye
        for title, time, bh, mc, mc_eg, zbr, jg, daima, fangshi, toubiao, jieguo, a, b in zip(dt_title,dt_time,dt_xiangmumingcheng,dt_xiangmubianhao,dt_fanwei,dt_zhaobiaojigou,dt_zhaobiaoren,dt_kaibiao,dt_kaishi,dt_jiezhi,dt_zhongbiaoren,dt_zhizao,dt_guojiadiqu,dt_hangye):
            self.save_data(title, time, bh, mc, mc_eg, zbr, jg, daima, fangshi, toubiao, jieguo, a, b)
    def save_data(self, title, time, bh, mc, mc_eg, zbr, jg, daima, fangshi, toubiao, jieguo, a, b):
        my_list = [title, time, bh, mc, mc_eg, zbr, jg, daima, fangshi, toubiao, jieguo,a ,b]
        self.ws.append(my_list)
        self.wb.save(filename="excel__%s招标变更.xlsx" % self.payload['fullText'])

if __name__ == '__main__':
    url = "https://www.chinabidding.com/search/proj.htm"

    # a = Bidding_Spiders()
    # a.parse_main_page()
    # b = Change_Spiders()
    # b.get_data_one(url)
    # c = TB_Spiders()
    # c.get_data_one(url)
    d = ZB_Spaders()
    d.get_data_one(url)