#!/usr/bin/env python3 
# -*- coding: utf-8 -*- 
# @Author : Leo
# @File : purchaseSpiderDemo2.py 


"""
2018-08-12：
    取消递归爬取下一页
"""
import json
import random
import time
import pymysql
import requests
import logging
import traceback
from urllib import parse
from requests.adapters import HTTPAdapter
from bs4 import BeautifulSoup

logging.basicConfig(level=logging.INFO,  # 最低输出
                    format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
                    datefmt='%a, %d %b %Y %H:%M:%S')

# 天津市政府采购网主页
HOMR_URL = 'http://www.tjgp.gov.cn/portal/topicView.do'

# POST请求url
REQUEST_BASE_URL = 'http://www.tjgp.gov.cn/portal/topicView.do'

# 新闻文章详情页面url
NEWS_BASE_URL = 'http://www.tjgp.gov.cn/portal/documentView.do'
# HEADERS信息
HEADERS = {
    'Accept-Language': 'zh-CN,zh;q=0.9',  # 该参数影响请求的时间表达格式
    'Host': "www.tjgp.gov.cn",
    'Origin': "http://www.tjgp.gov.cn",
    'Referer': "http://www.tjgp.gov.cn/portal/topicView.do?method=view&view=Infor&id=1665&ver=2&st=1",
    'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36",
    'X-Requested-With': "XMLHttpRequest",
    'Cache-Control': "no-cache",
    'Postman-Token': "5a9b1f70-e60b-4e6d-b378-8b6e287bfb40"
}

# 请求参数
FORM_DATA = {
    'method': 'view',
    'page': '1',
    # 'id': '1665',  # id=1665为市级，id=1664为区级
    'step': '1',
    'view': 'Infor',
    'st': '1'
}

# MySQL数据库配置
DB_CONFIG = {
    'host': 'localhost',
    'user': 'root',
    'passwd': '123456',
    'port': 3306,
    'db': 'tianjin_gov'
}

all_news_count = 0
crawl_news_count = 0
save_news_count = 0


def reqUrl(session, req_url, method='GET', form_data=None, timeout=20, *args, **kwargs):
    """
    通用请求方式
    :param session: 请求会话
    :param req_url: 请求url
    :param method: 请求方法
    :param form_data: post请求时的form_data
    :param timeout: 超时设置
    :param args:
    :param kwargs:
    :return: response
    """
    try:
        time.sleep(random.random() * 0.5)
        logging.info('[%s] %s >> %s' % (method, req_url, form_data))
        if method == 'GET':
            response = session.get(url=req_url, timeout=timeout)
            return response
        elif method == 'POST':
            response = session.post(url=req_url, data=form_data, timeout=timeout)
            return response
        else:
            pass
    except:
        logging.error('REQUEST ERROR:' + traceback.format_exc())
        pass


def getNewsContent(session, content_url):
    """
    获取采购页面的详情页
    :param session: 请求会话
    :param content_url: 详情页url
    :return: 详情页带标签文本
    """
    global crawl_news_count
    response = reqUrl(session, req_url=content_url, method='GET')
    if response is not None and response.status_code == 200 and response.text is not None:
        content_tag = BeautifulSoup(response.text, 'html.parser').body.extract()
        # print(content_tag)
        if not content_tag:
            return ''
        content = str(content_tag).strip().replace('\n', '').replace('\r', '').replace('\t', '')
        logging.info('Get news content success: %s' % response.url)
        crawl_news_count += 1
        return content
    else:
        with open('fail_get.log', 'a', encoding='utf8') as f:
            f.write(content_url + '\n')
        return ''


def getNewsList(session, list_url, form_data):
    """
    获取新闻列表
    :param session: 请求会话
    :param list_url: 列表url
    :param form_data: 请求表单数据
    :return:
    """
    response = reqUrl(session, req_url=list_url, method='POST', form_data=form_data)
    if response is not None and response.status_code == 200 and response.text is not None:
        list_bsobj = BeautifulSoup(response.text, 'html.parser')
        data_list_tag = list_bsobj.find('ul', {'class': 'dataList'})
        if data_list_tag is not None:
            news_tag_list = data_list_tag.find_all('a', {'href': True, 'title': True})
            if news_tag_list:
                news_items_list = [{'news_url': NEWS_BASE_URL + '?method=view&' + each_tag.attrs['href'].split('?')[-1],
                                    'title': each_tag.attrs['title'].strip(),
                                    'date': each_tag.next_sibling.get_text(),
                                    'area_id': form_data['id'],
                                    'news_id': parse.parse_qs(each_tag.attrs['href'].split('?')[-1]).get('id')[0]
                                    } for each_tag in news_tag_list]
                return news_items_list

    else:
        with open('fail_post.log', 'a', encoding='utf8') as f:
            f.write(json.dumps(form_data, ensure_ascii=False) + '\n')
        return []


def getMaxPageNum(session, list_url, form_data):
    response = reqUrl(session, req_url=list_url, method='POST', form_data=form_data)
    if response is not None and response.status_code == 200 and response.text is not None:
        list_bsobj = BeautifulSoup(response.text, 'html.parser')
        page_tag = list_bsobj.find('span', {'class': 'countPage'})
        max_page_num = int(page_tag.b.get_text()) if page_tag is not None else 0
        return max_page_num
    else:
        return 0


def createDBTable(conn):
    Sql_city_table = '''
        CREATE TABLE IF NOT EXISTS `city_proc` (
        `id` INT(10) UNSIGNED NOT NULL AUTO_INCREMENT,
        `title` VARCHAR(200) NOT NULL DEFAULT '0',
        `news_id` CHAR(50) NOT NULL DEFAULT '0',
        `create_time` DATE NULL DEFAULT NULL,
        `public_time` DATE NULL DEFAULT NULL,
        `url` VARCHAR(100) NULL DEFAULT NULL,
        `content` TEXT NULL,
        PRIMARY KEY (`id`),
        UNIQUE INDEX `news_id` (`news_id`)
    )
    COLLATE='utf8_general_ci'
    ENGINE=InnoDB
    AUTO_INCREMENT=21
    ;
    '''
    Sql_district_table = '''
        CREATE TABLE IF NOT EXISTS `district_proc` (
        `id` INT(10) UNSIGNED NOT NULL AUTO_INCREMENT,
        `title` VARCHAR(200) NOT NULL DEFAULT '0',
        `news_id` CHAR(50) NOT NULL DEFAULT '0',
        `create_time` DATE NULL DEFAULT NULL,
        `public_time` DATE NULL DEFAULT NULL,
        `url` VARCHAR(100) NULL DEFAULT NULL,
        `content` TEXT NULL,
        PRIMARY KEY (`id`),
        UNIQUE INDEX `news_id` (`news_id`)
    )
    COLLATE='utf8_general_ci'
    ENGINE=InnoDB
    AUTO_INCREMENT=21
    ;'''
    cursor = conn.cursor()
    cursor.execute(Sql_city_table)
    cursor.execute(Sql_district_table)
    conn.commit()
    cursor.close()


def save2DB(conn, items_list):
    """
    保存数据至数据库
    :param conn: 数据库连接对象
    :param items_list: 数据列表
    :return: None
    """
    global save_news_count
    try:
        if not items_list:
            return None
        create_date = time.strftime('%Y-%m-%d')
        cursor = conn.cursor()
        for item in items_list:
            if item['area_id'] == '1665':
                Sql = 'INSERT IGNORE INTO city_proc(title, news_id, create_time, public_time, url, content) ' \
                      'VALUES("{title}", "{news_id}", "{create_time}","{public_time}", "{url}", "{content}")'
                Sql = Sql.format(title=item['title'].replace('\"', '\"\"'), news_id=item['news_id'], create_time=create_date,
                                 public_time=item['date'] if item['date'] else create_date,
                                 url=item['news_url'], content=item['content'].replace('\"', '\"\"'))
                save_news_count += 1
                cursor.execute(Sql)
            elif item['area_id'] == '1664':
                Sql = 'INSERT IGNORE INTO district_proc(title, news_id, create_time, public_time, url, content) ' \
                      'VALUES("{title}", "{news_id}", "{create_time}","{public_time}", "{url}", "{content}")'
                Sql = Sql.format(title=item['title'].replace('\"', '\"\"'), news_id=item['news_id'], create_time=create_date,
                                 public_time=item['date'] if item['date'] else create_date,
                                 url=item['news_url'], content=item['content'].replace('\"', '\"\"'))
                cursor.execute(Sql)
                save_news_count += 1
            else:
                pass
        conn.commit()
        cursor.close()
    except:
        logging.error('SAVE TO DATABASES ERROR:' + traceback.format_exc())


def start_crawl():
    """
    开始爬取数据
    :return:
    """
    global all_news_count, crawl_news_count, save_news_count
    conn = pymysql.connect(host=DB_CONFIG['host'], port=DB_CONFIG['port'],
                           user=DB_CONFIG['user'], passwd=DB_CONFIG['passwd'],
                           db=DB_CONFIG['db'], charset='utf8mb4')
    createDBTable(conn)
    session = requests.session()
    session.mount('http://', HTTPAdapter(max_retries=5))  # 设置重试次数
    session.mount('https://', HTTPAdapter(max_retries=5))
    session.headers = HEADERS
    for each_id in {'1665', '1664'}:
        all_news_count, crawl_news_count, save_news_count = 0, 0, 0
        form_data = FORM_DATA.copy()
        form_data['id'] = each_id
        max_page_num = getMaxPageNum(session, REQUEST_BASE_URL, form_data)
        assert max_page_num > 0
        for each_page in range(1, max_page_num + 1):
            logging.info('Current category: %s, crawl progress: %s / %s' % (each_id, each_page, max_page_num))
            form_data['page'] = str(each_page)
            news_items_list = getNewsList(session, REQUEST_BASE_URL, form_data)
            all_news_count += len(news_items_list)
            result_items_list = []
            for each_news_item in news_items_list:
                each_news_item['content'] = getNewsContent(session, each_news_item['news_url'])
                result_items_list.append(each_news_item)
            save2DB(conn, result_items_list)
            logging.info('All: %s, Crawl: %s, Save: %s.' % (all_news_count, crawl_news_count, save_news_count))
    logging.info('Crawl news content complete.')


if __name__ == '__main__':
    start_crawl()
