import requests
import os
import sys
import json
import datetime
import re
import logging
import time
import math
pwd = os.getcwd()
sys.path.append(pwd)
# from proxy import Proxy
from lxml import etree
from urllib.parse import urlencode
# gov数据库
from sql import sql_gov_model

class GovMost(object):
    url = 'https://service1.most.gov.cn/sousuo/s.html'
    headers = {
        'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
    }
    base_dir = './'
    def __init__(self, keyword):
        # self.proxy = Proxy().get_proxy()
        self.keyword = keyword
        self.yestoday = self.get_yestoday().replace(':', '-')
        save_dir = self.base_dir + self.yestoday
        # self.folder_exist(save_dir)
        self.save_file = save_dir + '/' + keyword + '.csv'
        self.data = {
            'year': '2020', # 查询的年份
            'channel': '', # 栏目类型，为空表示所有
            'publishDate': '2020-05-01', # 开始时间
            'publishDateUpper': '2020-05-18', # 结束时间
            'title': keyword, # 搜索标题的关键字
            'content': '', # 搜索内容的关键字
            'p': 1, # 当前页数
            'pager.pageNumber': 1, # 当前页数
            'pager.pageSize': 50, # 每页显示的记录数
        }

    # 获取大前天的年月日
    def get_yestoday(self):
        yesterday = datetime.datetime.today() + datetime.timedelta(-3)
        str_yesterday = datetime.datetime.strftime(yesterday, "%Y-%m-%d")
        return str_yesterday

    # 请求网页,获取html
    def request(self):
        self.data['publishDate'] = self.get_yestoday()
        # self.data['publishDate'] = '2020-06-05'
        self.data['publishDateUpper'] = time.strftime("%Y-%m-%d", time.localtime())
        # 更换关键词
        self.data['title'] = self.keyword
        response = requests.post(url=self.url, headers=self.headers, data=self.data, timeout=60)
        self.response = response.text
        # with open('trim.html', 'w') as f:
        #     f.write(self.response)
        max_page = self.get_max_page()
        current_page = 1
        while current_page <= max_page:
            self.data['p'] = current_page
            self.data['pager.pageNumber'] = current_page
            response = requests.post(url=self.url, headers=self.headers, data=self.data, timeout=60)
            self.response = response.text
            self.page_parse()
            current_page += 1

    # 获取最大页数
    def get_max_page(self):
        html = etree.HTML(self.response)
        try:
            record = int(html.xpath('//input[@id="pager_pageCount"]/@value')[0]) # 几条信息
            max_page = math.ceil(record/20)
        except IndexError:
            # 没有搜索到内容
            print('没有搜索到内容：', self.url, self.data)
            return 0
        return int(max_page)
    
    # 获取招标信息
    def page_parse(self):
        html = etree.HTML(self.response)
        list_content = html.xpath('/html/body/div/div[@class="floor"]/div[@class="right_list mB15"]/div[@class="seach_list"]/dl') # 一页的所有记录
        # 如果list_content长度为0,说明当前页没有一条记录
        if len(list_content) == 0:
            return None
        for current_content in list_content:

            url = current_content.xpath('dt/a/@href')[0] # 链接

            title_info = current_content.xpath('dt/a')[0]
            title = title_info.xpath('string(.)').strip().replace(' ', '').replace(',', '，').replace('\n', '') # 标题

            desc = current_content.xpath('dd[1]/p/text()')[0].strip().replace(' ', '').replace(',', '，').replace('\n', '') # 摘要

            str_time = current_content.xpath('dd[2]/span/span[1]/code/text()')[0]
            str_time = datetime.datetime.strptime(str_time, '%Y-%m-%d') # 发布时间

            str_type = current_content.xpath('dd[2]/span/span[2]/text()')[0] # 公告类型

            print(self.keyword, url, title, desc, str_time, str_type)
            gov = sql_gov_model.Gov(website='国家科技管理信息系统公共服务平台', keyword=self.keyword, title=title, url=url, str_time=str_time, str_type=str_type, desc=desc)
            sql_gov_model.insert_gov(gov)

    # 向文件写内容
    def write_file(self, content):
        with open(self.save_file, 'a') as f:
            f.write(content)

    def folder_exist(self, dir_path):
        '''
        1. 作用:判断文件夹路径是否存在,不存在则创建
        2. 参数:dir_path:文件夹路径
        3. 返回值:None
        '''
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)

if __name__ == '__main__':
    list_keywords = ['人工智能', '人才', '职称', '项目', '专项', '指南', '申报', '奖励']
    for keywords in list_keywords:
        gov_most = GovMost(keywords)
        gov_most.request()
    
