import requests
import os
import sys
import json
import datetime
import re
import logging
import time
import math
pwd = os.getcwd()
sys.path.append(pwd)
# from proxy import Proxy
from lxml import etree
from urllib.parse import urlencode
# gov数据库
from sql import sql_gov_model

class GovZjsfq(object):
    url = 'http://www.zjsfq.gov.cn/cms/1/search.do'
    headers = {
        'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
    }
    base_dir = './'
    def __init__(self, keyword):
        # self.proxy = Proxy().get_proxy()
        # self.yestoday = self.get_yestoday().replace(':', '-')
        # save_dir = self.base_dir + self.yestoday
        # self.folder_exist(save_dir)
        # self.save_file = save_dir + '/' + keyword + '.csv'
        self.keyword = keyword
        self.data = {
            'basic_title': keyword, # 搜索的关键字
            'basic_categoryId': '',
            'search_display': '0',
            'size': '20', # 每页显示条数
            'pageNo': '1', # 当前页数
        }

    # 获取昨天的年月日
    def get_yestoday(self):
        yesterday = datetime.datetime.today() + datetime.timedelta(-1)
        str_yesterday = datetime.datetime.strftime(yesterday, "%Y-%m-%d")
        return str_yesterday

    # 请求网页,获取html
    def request(self):
        response = requests.get(url=self.url, headers=self.headers, params=self.data, timeout=60)
        self.response = response.text
        # max_page = self.get_max_page()
        # 找不到最大页数，页面也有BUG，点击下一页会报错，默认给个100页
        max_page = 100
        current_page = 1
        while current_page <= max_page:
            self.data['pageNo'] = current_page
            response = requests.get(url=self.url, headers=self.headers, params=self.data, timeout=60)
            self.response = response.text
            result = self.page_parse()
            if result == 'end':
                # print('end')
                break
            current_page += 1

    # 获取最大页数
    def get_max_page(self):
        html = etree.HTML(self.response)
        record = int(html.xpath('//input[@id="pager_pageCount"]/@value')[0]) # 几条信息
        max_page = math.ceil(record/20)
        return int(max_page)

    # 获取招标信息
    def page_parse(self):
        html = etree.HTML(self.response)
        list_content = html.xpath('/html/body/div[@class="search jz"]/ul/li') # 一页的所有记录
        # 如果list_content长度为0,说明当前页没有一条记录
        if len(list_content) == 0:
            return 'end'
        for current_content in list_content:

            url = 'http://www.zjsfq.gov.cn' + current_content.xpath('a/@href')[0] # 链接
            title = current_content.xpath('a/@title')[0] # 标题
            str_time = current_content.xpath('span/text()')[0]
            str_time = datetime.datetime.strptime(str_time, '%Y-%m-%d') # 发布时间

            print(self.keyword, url, title, str_time)
            gov = sql_gov_model.Gov(website='上海张江国家自主创新示范区', keyword=self.keyword, title=title, url=url, str_time=str_time, str_type='', desc='')
            sql_gov_model.insert_gov(gov)

    # 向文件写内容
    def write_file(self, content):
        with open(self.save_file, 'a') as f:
            f.write(content)

    def folder_exist(self, dir_path):
        '''
        1. 作用:判断文件夹路径是否存在,不存在则创建
        2. 参数:dir_path:文件夹路径
        3. 返回值:None
        '''
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)

if __name__ == '__main__':
    list_keywords = ['人工智能', '人才', '职称', '项目', '专项', '指南', '申报', '奖励']
    for keywords in list_keywords:
        gov_zjsfq = GovZjsfq(keywords)
        gov_zjsfq.request()

