import requests
import os
import sys
import json
import datetime
import re
import logging
import time
pwd = os.getcwd()
sys.path.append(pwd)
# from proxy import Proxy
from lxml import etree

host = '192.168.50.75:5001'
def post_caigou_data(data):
    r = requests.post(host + '/api/v1/cg/', data=data)
    print(r)

class ChinaSourcing(object):
    url = 'http://search.ccgp.gov.cn/bxsearch'
    headers = {
        'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
    }
    base_dir = './'
    def __init__(self, keyword):
        # self.proxy = Proxy().get_proxy()
        self.yestoday = self.get_yestoday().replace(':', '')
        save_dir = self.base_dir + self.yestoday
        self.folder_exist(save_dir)
        self.save_file = save_dir + '/' + keyword + '.csv'
        self.params = {
            'searchtype': '', # 公告类型
            'page_index': '1', # 当前页数
            'kw': keyword, # 关键词
            'start_time': self.yestoday, # 开始时间
            'end_time': self.yestoday, # 结束时间
            'timeType': '6', # 时间类型, 0:今天, 1:近3日, 2：近1周, 3近1月, 4:近3月, 5:近半年, 6:指定时间
            'displayZone': '',
            'zoneId': '',
            'pppStatus': '0',
            'agentName': '',
            'bidSort': '0',
            'buyerName': '',
            'projectId': '',
            'pinMu': '0',
            'bidType': '0',
            'dbselect': 'bidx',
        }
    # 获取昨天的年月日
    def get_yestoday(self):
        yesterday = datetime.datetime.today() + datetime.timedelta(-1)
        str_yesterday = datetime.datetime.strftime(yesterday, "%Y:%m:%d")
        return str_yesterday

    # 请求网页
    def request_yestoday(self):
        self.params['start_time'] = '2018:01:01' # 开始时间
        self.params['end_time'] = '2019:01:01' # 结束时间
        try:
            response = requests.get(url=self.url, params=self.params, headers=self.headers, timeout=10)
        except:
            # print('重新获取代理ip')
            # self.proxy = Proxy().get_proxy()
            response = requests.get(url=self.url, params=self.params, headers=self.headers, timeout=10)
            while response.status_code != 200:
                # self.proxy = Proxy().get_proxy()
                response = requests.get(url=self.url, params=self.params, headers=self.headers, timeout=10)
        return response.text
    
    # 获取最大页数
    def get_max_page(self, response):
        pattern = 'size: (\d+),'
        max_page = int(re.search(pattern, response).group(1))
        return max_page
    
    # 获取招标信息
    def get_info(self, response, current_page):
        html = etree.HTML(response)
        list_content = html.xpath('/html/body/div[5]/div[2]/div/div/div[1]/ul/li') # 一条记录
        content = ''
        for current_content in list_content:
            url = current_content.xpath('a/@href')[0] # 链接
            title_info = current_content.xpath('a')[0]
            title = title_info.xpath('string(.)').strip().replace(' ', '').replace(',', '，') # 标题
            print('第'+ str(current_page) + '页,' + title)
            desc = current_content.xpath('p/text()')[0].replace('\n', '').replace('\r', '').replace('\t', '').replace(' ', '').replace(',', '，') # 摘要
            date_person_company = current_content.xpath('span/text()[1]')[0].replace('\n', '').replace('\r', '').replace('\t', '').strip()
            date_str = date_person_company.split('|')[0].strip().replace('.', '-')
            date_at = datetime.datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S') # 时间
            buyer = date_person_company.split('|')[1].strip().replace(',', '，')[4:] # 采购人
            agent = date_person_company.split('|')[2].strip().replace(',', '，')[5:] # 代理机构
            kind = current_content.xpath('span/strong[2]/text()') # 服务类型
            if len(kind) != 0:
                kind = kind[0].replace('\n', '').replace('\r', '').replace('\t', '').replace(' ', '').replace(',', '，')
            else:
                kind = ''
            category = current_content.xpath('span/strong[1]/text()')[0].replace('\n', '').replace('\r', '').replace('\t', '').replace(' ', '').replace(',', '，') # 公告类型
            area = current_content.xpath('span/a/text()') # 省份
            if len(area) != 0:
                area = area[0].replace('\n', '').replace('\r', '').replace('\t', '').replace(' ', '').replace(',', '，')
            else:
                area = ''
            data = {'kind':kind, 'title':title, 'date_at':date_at, 'buyer':buyer, 'agent':agent, 'category':category, 'area':area, 'desc':desc, 'url':url, 'key':self.params['kw']}
            # post_caigou_data(data) # 向接口发送数据
            content += kind + ', ' + title + ', ' + date_str + ', ' + buyer + ', ' + agent + ', ' + category + ', ' + area + ', ' + desc + ', ' + url + ', ' + self.params['kw'] + '\n'
        return content

    # 获取所有信息
    def get_all_info(self):
        response = self.request_yestoday()
        max_page = self.get_max_page(response)
        current_page = 1
        while current_page <= max_page:
            self.params['page_index'] = str(current_page)
            response = self.request_yestoday()
            content = self.get_info(response, current_page)
            self.write_file(content)
            current_page += 1
    
    # 向文件写内容
    def write_file(self, content):
        with open(self.save_file, 'a') as f:
            f.write(content)

    def folder_exist(self, dir_path):
        '''
        1. 作用:判断文件夹路径是否存在,不存在则创建
        2. 参数:dir_path:文件夹路径
        3. 返回值:None
        '''
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)

if __name__ == '__main__':
    list_keyword = ['智慧 大脑', '交通']
    for keyword in list_keyword:
        china_sourcing = ChinaSourcing(keyword)
        china_sourcing.get_all_info()
    
