import requests
import os
import sys
import json
import datetime
import re
import logging
import time
import math
pwd = os.getcwd()
sys.path.append(pwd)
# from proxy import Proxy
from lxml import etree
from urllib.parse import urlencode
from requests.exceptions import ConnectTimeout
# gov数据库
from sql import sql_gov_model

class GovTmri(object):
    url = 'http://www.tmri.cn/search.aspx'
    headers = {
        'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
        'Content-Type': 'application/x-www-form-urlencoded'
    }
    base_dir = './'
    def __init__(self, keyword):
        # self.proxy = Proxy().get_proxy()
        self.keyword = keyword
        self.yestoday = self.get_yestoday().replace(':', '-')
        save_dir = self.base_dir + self.yestoday
        # self.folder_exist(save_dir)
        self.save_file = save_dir + '/' + keyword + '.csv'
        self.data = {
            'key': keyword,
            'mode': '0',
            'toPage': '1',
        }
        self.data_gb2312 = urlencode(self.data, encoding='gb2312')
    # 获取昨天的年月日
    def get_yestoday(self):
        yesterday = datetime.datetime.today() + datetime.timedelta(-1)
        str_yesterday = datetime.datetime.strftime(yesterday, "%Y-%m-%d")
        return str_yesterday

    # 请求网页,获取html
    def request(self):
        try:
            response = requests.post(url=self.url, headers=self.headers, data=self.data_gb2312, timeout=10)
        except ConnectTimeout:
            response = requests.post(url='http://www.baidu.com', headers=self.headers, data=self.data_gb2312, timeout=10)
            response.status_code = 404
            while response.status_code!=200:
                try:
                    response = requests.post(url=self.url, headers=self.headers, data=self.data_gb2312, timeout=10)
                except ConnectTimeout:
                    print('请求超时1')
                    pass
        response.encoding = 'gb2312'
        self.response = response.text
        # with open('trim.html', 'w') as f:
        #     f.write(self.response)
        max_page = self.get_max_page()
        current_page = 1
        while current_page <= max_page:
            self.data['toPage'] = current_page
            self.data_gb2312 = urlencode(self.data, encoding='gb2312')
            try:
                response = requests.post(url=self.url, headers=self.headers, data=self.data_gb2312, timeout=10)
                response.encoding = 'gb2312'
                self.response = response.text
                self.page_parse()
                current_page += 1

                time.sleep(5)
            except ConnectTimeout:
                print('请求超时2')
                continue


    # 获取最大页数
    def get_max_page(self):
        try:
            result = re.search('共(\d+)页', self.response).group(1)
            max_page = int(result)
        except AttributeError as e:
            max_page = 0
            print(self.data['key'], '搜索出错')
        return max_page

    # 获取招标信息
    def page_parse(self):
        html = etree.HTML(self.response)
        list_content = html.xpath('//*[@id="searchpanel"]/div[@class="searchbody"]') # 一页的所有记录
        # 如果list_content长度为0,说明当前页没有一条记录
        if len(list_content) == 0:
            return None
        for current_content in list_content:
            url = 'http://www.tmri.cn/'+current_content.xpath('div[@class="searchbody_title"]/a/@href')[0] # 链接

            title_info = current_content.xpath('div[@class="searchbody_title"]/a')[0]
            title = title_info.xpath('string(.)').strip().replace(' ', '').replace(',', '，').replace('\n', '') # 标题

            desc_info = current_content.xpath('div[@class="searchbody_content"]')[0]
            desc = desc_info.xpath('string(.)').strip().replace(' ', '').replace(',', '，').replace('\n', '') # 摘要

            str_type = current_content.xpath('div[@class="searchbody_command"]/a/text()')[0] # 公告类型

            other_info = current_content.xpath('div[@class="searchbody_command"]/text()')[1] # 其他信息，    发布时间：2019-12-17 18:10    网页大小：26K    浏览次数：701
            
            str_time = re.search('(\d{4}-\d{2}-\d{2})', other_info).group(1)
            str_time = datetime.datetime.strptime(str_time, '%Y-%m-%d') # 时间

            # print(url, title, desc, str_type, str_time)
            # gov = sql_gov_model.Gov(website='公安部交通管理科学研究所', keyword=self.keyword, title=title, url=url, str_time=str_time, str_type=str_type, desc=desc)
            # sql_gov_model.insert_gov(gov)
    
    # 向文件写内容
    def write_file(self, content):
        with open(self.save_file, 'a') as f:
            f.write(content)

    def folder_exist(self, dir_path):
        '''
        1. 作用:判断文件夹路径是否存在,不存在则创建
        2. 参数:dir_path:文件夹路径
        3. 返回值:None
        '''
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)

if __name__ == '__main__':
    list_keywords = ['人工智能', '人才', '职称', '项目', '专项', '指南', '申报', '奖励']
    for keywords in list_keywords:
        gov_tmri = GovTmri(keywords)
        gov_tmri.request()
