import requests
import os
import sys
import json
import datetime
import re
import logging
import time
import math
pwd = os.getcwd()
sys.path.append(pwd)
# from proxy import Proxy
from lxml import etree
# gov数据库
from sql import sql_gov_model

class GovStcsmsh(object):
    url = 'http://stcsm.sh.gov.cn/search/searchGj.jsp'
    headers = {
        'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
    }
    base_dir = './'
    def __init__(self, keyword):
        # self.proxy = Proxy().get_proxy()
        self.keyword = keyword
        self.yestoday = self.get_yestoday().replace(':', '-')
        save_dir = self.base_dir + self.yestoday
        # self.folder_exist(save_dir)
        self.save_file = save_dir + '/' + keyword + '.csv'
        self.params = {
            'qQuanbu': keyword, # 搜索的关键字
            'proSymbol': 'CMSstcsm',
            't_id': '244',
            'scope': '',
            'color': 'red',
            'class': '0',
            'sort': 'desc', # desc：按日期排序，si：按相关度排序，为空：智能排序
            'dateTag': 'zdsj1', # zdsj1：指定时间，all：所有
            'attachType': 'all',
            'qIndex': '2', # 1：全部信息，2：仅搜标题
            'catId': '',
            'startTime': '2020-05-01', # 指定开始时间
            'endTime': '2020-05-15', # 指定结束时间
            'zdsj': 'zdsj1',
            'p': 1, # 当前页数
        }
    # 获取大前天的年月日
    def get_yestoday(self):
        yesterday = datetime.datetime.today() + datetime.timedelta(-3)
        str_yesterday = datetime.datetime.strftime(yesterday, "%Y-%m-%d")
        return str_yesterday

    # 请求网页,获取html
    def request(self):
        self.params['startTime'] = self.get_yestoday()
        # self.params['startTime'] = '2020-06-05'
        self.params['endTime'] = time.strftime("%Y-%m-%d", time.localtime())
        self.response = requests.get(url=self.url, params=self.params, headers=self.headers, timeout=20).text
        # with open('stcsm.html', 'w') as f:
        #     f.write(self.response)
        max_page = self.get_max_page()
        print('最大页数', max_page)
        current_page = 1
        while current_page <= max_page:
            self.params['p'] = current_page
            self.response = requests.get(url=self.url, params=self.params, headers=self.headers, timeout=20).text
            self.page_parse()
            current_page += 1

    # 获取最大页数
    def get_max_page(self):
        html = etree.HTML(self.response)
        record = int(html.xpath('/html/body/div[@class="search_wrap"]/div[@class="search_title"]/p/span[2]/text()')[0]) # 几条信息
        # record = html.xpath('/html/body/div[@class="search_wrap"]/div[@class="search_title"]/p/span[2]/text()') # 几条信息
        # print(record)
        max_page = math.ceil(record/20)
        return int(max_page)

    # 获取招标信息
    def page_parse(self):
        html = etree.HTML(self.response)
        list_content = html.xpath('/html/body/div[@class="search_wrap"]/div[@class="search_inner"]/div[@class="search_left"]/div') # 一页的所有记录
        # 如果list_content长度为1,说明当前页没有一条记录
        if len(list_content) == 1 or len(list_content) == 0:
            print(self.keyword, '没有结果')
            return None
        for current_content in list_content:
            try:
                url = current_content.xpath('div[2]/a/@href')[0]
                if 'http' not in url:
                    url = 'http://stcsm.sh.gov.cn'+url # 链接
            except IndexError:
                print('IndexError')
                return None

            title_info = current_content.xpath('div[2]/a')[0]
            title = title_info.xpath('string(.)').strip().replace(' ', '').replace(',', '，').replace('\n', '') # 标题
            desc = current_content.xpath('div[4]/text()')[0].strip().replace(' ', '').replace(',', '，').replace('\n', '') # 文章摘要
            str_time = current_content.xpath('div[5]/span/table/tbody/tr/td/span/text()')[0] # 发表时间
            str_time = re.search('(\d{4}-\d{2}-\d{2})', str_time).group(1)
            str_time = datetime.datetime.strptime(str_time, '%Y-%m-%d')

            print('上海市科学技术委员会', self.keyword, title, url, str_time, '', desc)
            gov = sql_gov_model.Gov(website='上海市科学技术委员会', keyword=self.keyword, title=title, url=url, str_time=str_time, str_type='', desc=desc)
            sql_gov_model.insert_gov(gov)

    # 向文件写内容
    def write_file(self, content):
        with open(self.save_file, 'a') as f:
            f.write(content)

    def folder_exist(self, dir_path):
        '''
        1. 作用:判断文件夹路径是否存在,不存在则创建
        2. 参数:dir_path:文件夹路径
        3. 返回值:None
        '''
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)

if __name__ == '__main__':
    list_keywords = ['人工智能', '人才', '职称', '项目', '专项', '指南', '申报', '奖励']
    for keywords in list_keywords:
        gov_stcsmsh = GovStcsmsh(keywords)
        gov_stcsmsh.request()
    
