import requests
import os
import sys
import json
import datetime
import re
import logging
import time
pwd = os.getcwd()
sys.path.append(pwd)
# from proxy import Proxy
from lxml import etree
# gov数据库
from sql import sql_gov_model

class GovShanghai2(object):
    url = 'http://www.xuhui.gov.cn/search/search'
    headers = {
        'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
    }
    base_dir = './'
    def __init__(self, keyword):
        # self.proxy = Proxy().get_proxy()
        self.keyword = keyword
        self.yestoday = self.get_yestoday().replace(':', '-')
        save_dir = self.base_dir + self.yestoday
        # self.folder_exist(save_dir)
        self.save_file = save_dir + '/' + keyword + '.csv'
        self.params = {
            'page': '1', # 当前页数
            'view': '', # '':全部，'xxgk':政务公开，'xwzx':新闻中心，'wshd':政民互动，'bmfw':公众服务
            'contentScope': '1', # 1:搜标题，2:搜全文
            'dateOrder': '2', # 1：智能排序，2：按日期排序
            'tr': '5', # 1:一周内，2:一月内，3:一年内，4:不限，5:指定日期
            'dr': '2020-05-11+至+2020-05-15', # 选择日期范围
            'format': '1',
            'uid': '',
            're': '2',
            'all': '1',
            'debug': '1',
            'siteId': 'www.xuhui.gov.cn',
            'siteArea': 'all', # 搜索的站点
            'csrf_feedback': '',
            'q': keyword,
        }
    # 获取大前天的年月日
    def get_yestoday(self):
        yesterday = datetime.datetime.today() + datetime.timedelta(-3)
        str_yesterday = datetime.datetime.strftime(yesterday, "%Y-%m-%d")
        return str_yesterday

    # 请求网页,获取html
    def request(self):
        yestoday = self.get_yestoday()
        # yestoday = '2020-06-05'
        today = time.strftime("%Y-%m-%d", time.localtime())
        # today = '2020-05-18'
        self.params['dr'] = yestoday + '+至+' + today
        start = True
        next_page = self.url
        current_page = ''

        # content = '文章标题,链接,来源,摘要\n'
        # self.write_file(content)

        while next_page != '':
            # print(next_page)
            current_page = next_page
            # print(current_page)
            # try:
            if not start:
                response = requests.get(url='http://www.xuhui.gov.cn'+current_page, headers=self.headers, timeout=60)
            else:
                response = requests.get(url=current_page, params=self.params, headers=self.headers, timeout=60)
                start = False
            # except:
            #     print('重新获取代理ip')
            #     self.proxy = Proxy().get_proxy()
            #     response = requests.get(url=current_page, params=self.params, proxies=self.proxy, headers=self.headers, timeout=10)
                # while response.status_code != 200:
                #     self.proxy = Proxy().get_proxy()
                #     response = requests.get(url=current_page, params=self.params, proxies=self.proxy, headers=self.headers, timeout=10)
            self.response = response.text
            next_page = self.page_parse()
    
    # 获取招标信息
    def page_parse(self):
        html = etree.HTML(self.response)
        list_content = html.xpath('//*[@id="results"]/div[@class="result "]') # 一页的所有记录
        for current_content in list_content:
            title = current_content.xpath('a/@title')[0] # 标题

            url = current_content.xpath('div[@class="restcont"]//div[@class="other"]/div[1]/a/@href')[0] # 链接

            try:
                desc_info = current_content.xpath('div[@class="restcont"]/div[@class="content"]/div[@class="entry-news-content"]/div[1]')[0]
                desc = desc_info.xpath('string(.)').strip().replace(' ', '').replace(',', '，').replace('\n', '') # 文章摘要
            except IndexError:
                desc_info = current_content.xpath('div[@class="restcont"]/div[@class="content"]')[0]
                desc = desc_info.xpath('string(.)').strip().replace(' ', '').replace(',', '，').replace('\n', '') # 文章摘要

            try:
                str_time = re.search('(\d{4}-\d{2}-\d{2})', desc).group(1)
                str_time = datetime.datetime.strptime(str_time, '%Y-%m-%d') # 时间
            except AttributeError:
                str_time = datetime.datetime.now() # 时间

            str_type_info = current_content.xpath('div[@class="restcont"]//div[@class="other"]/div[2]')[0]
            str_type = str_type_info.xpath('string(.)').strip().replace(' ', '').replace(',', '，').replace('\n', '') # 信息来源

            print('上海徐汇政务搜索平台', self.keyword, url, str_time,)
            gov = sql_gov_model.Gov(website='上海徐汇政务搜索平台', keyword=self.keyword, title=title, url=url, str_time=str_time, str_type=str_type, desc=desc)
            sql_gov_model.insert_gov(gov)
            # print(desc, str_time)
            # content = title + ',' + url + ',' + source + ',' + desc + '\n'
            # self.write_file(content)
        try:
            next_page = html.xpath('//*[@id="content-inner"]/nav/ul/li/a[@title="下一页"]/@href')[0]
        except IndexError:
            next_page = ''
        return next_page
    
    # 向文件写内容
    def write_file(self, content):
        with open(self.save_file, 'a') as f:
            f.write(content)

    def folder_exist(self, dir_path):
        '''
        1. 作用:判断文件夹路径是否存在,不存在则创建
        2. 参数:dir_path:文件夹路径
        3. 返回值:None
        '''
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)

if __name__ == '__main__':
    list_keywords = ['人工智能', '人才', '职称', '项目', '专项', '指南', '申报', '奖励']
    for keywords in list_keywords:
        gov_shanghai2 = GovShanghai2(keywords)
        gov_shanghai2.request()
    
