import requests
import os
import sys
import json
import datetime
import re
import logging
import time
import math
pwd = os.getcwd()
sys.path.append(pwd)
# from proxy import Proxy
from lxml import etree
from urllib.parse import urlencode
from requests.exceptions import ReadTimeout
# gov数据库
from sql import sql_gov_model

class GovMiit(object):
    url = 'http://www.miit.gov.cn/Searchweb/searchPic'
    headers = {
        'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
    }
    base_dir = './'
    def __init__(self, keyword):
        # self.proxy = Proxy().get_proxy()
        self.keyword = keyword
        self.yestoday = self.get_yestoday().replace(':', '-')
        save_dir = self.base_dir + self.yestoday
        # self.folder_exist(save_dir)
        self.save_file = save_dir + '/' + keyword + '.csv'
        self.data = {
            'sortType': '1', # 1：按时间排序
            'pageSize': '10', # 每页显示10条
            'pageNow': '1', # 当前页数
            'fullText': keyword, # 搜索关键字
            'searchType': '0',
            'cateId': '',
            'highlighter': '2',
            'jsfl': '',
            'keyType': 'title', # 查询范围，title：标题
            'timeRange': '0',
            'lowerLimit': '2019-04-01', # 开始时间
            'upperLimit': '2020-05-15', # 结束时间
            'keywordNavigation': '1',
            'likeSearch': '1',
            'reSearch': '2',
            'pySearch': '1',
            'ex': '',
            'url': '',
        }
    # 获取大前天的年月日
    def get_yestoday(self):
        yesterday = datetime.datetime.today() + datetime.timedelta(-3)
        str_yesterday = datetime.datetime.strftime(yesterday, "%Y-%m-%d")
        return str_yesterday

    # 请求网页,获取html
    def request(self):
        self.data['lowerLimit'] = self.get_yestoday()
        # self.data['lowerLimit'] = '2020-06-05'
        self.data['upperLimit'] = time.strftime("%Y-%m-%d", time.localtime())
        response = requests.post(url=self.url, headers=self.headers, data=self.data, timeout=10)
        self.response = response.json()
        # print(self.response)
        # with open('trim.html', 'w') as f:
        #     f.write(self.response)
        max_page = self.get_max_page()
        # print(max_page)
        current_page = 1
        while current_page <= max_page:
            self.data['pageNow'] = current_page
            try:
                response = requests.post(url=self.url, headers=self.headers, data=self.data, timeout=10)
                self.response = response.json()
                self.page_parse()
                current_page += 1
            except ReadTimeout:
                print('请求超时')


    # 获取最大页数
    def get_max_page(self):
        num = self.response['num']
        max_page = math.ceil(num/10)
        return max_page
    
    # 获取招标信息
    def page_parse(self):
        list_content = self.response['array'] # 一页的所有记录
        # 如果list_content长度为0,说明当前页没有一条记录
        if len(list_content) == 0:
            return None
        for current_content in list_content:
            url = current_content['url'] # 链接
            title = current_content['titleTerm'] # 标题
            desc = current_content['summaries'] # 摘要
            desc = re.sub(r'<[^>]+>', '', desc)
            str_time = current_content['showTime'] # 发表时间
            str_time = datetime.datetime.strptime(str_time, '%Y%m%d')
            str_type = current_content['subjectName'] # 公告类型

            print(url, title, desc, str_time, str_type)
            gov = sql_gov_model.Gov(website='中华人民共和国工业和信息化部统一检索平台', keyword=self.keyword, title=title, url=url, str_time=str_time, str_type=str_type, desc=desc)
            sql_gov_model.insert_gov(gov)
    
    # 向文件写内容
    def write_file(self, content):
        with open(self.save_file, 'a') as f:
            f.write(content)

    def folder_exist(self, dir_path):
        '''
        1. 作用:判断文件夹路径是否存在,不存在则创建
        2. 参数:dir_path:文件夹路径
        3. 返回值:None
        '''
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)

if __name__ == '__main__':
    list_keywords = ['人工智能', '人才', '职称', '项目', '专项', '指南', '申报', '奖励']
    for keywords in list_keywords:
        gov_miit = GovMiit(keywords)
        gov_miit.request()

