# 根据需求，需要对学院网站中校园新闻数据进行采集，目标网址为：http://www.ahdy.edu.cn/18/list.htm
# 1.编写代码完成以下数据的采集工作（新闻标题，发布者，发布时间，浏览次数，新闻内容）
# 2. 运行数据采集程序，完成前200条新闻数据的采集工作，将采集到数据保存成ahdyxw.csv
# 3. 编写程序读取ahdyxw.csv中的新闻标题，发布时间和浏览次数，
# 并筛选出2019年发布且浏览次数排在前10的新闻数据，使用matplotlib程序库中的
# bar函数实现上述前10数据的可视化，最后将上述前10数据写入到mysql中的t1数据库的xw表中，
# 表的结构自定义（只要满足上述数据项的存储即可）
# 访问本地mysql环境的相关参数如下
import pymysql
import json
import re
import requests
import time
import datetime
import os


class Ahdy(object):
    def __init__(self):
        self.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'
        }

        self.headers_post = {
            'Host': 'www.ahdy.edu.cn',
            'Connection': 'keep-alive',
            'Content-Length': '0',
            'Accept': 'text/plain, */*; q=0.01',
            'Origin': 'http://www.ahdy.edu.cn',
            'X-Requested-With': 'XMLHttpRequest',
            'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36',
            'Referer': 'http://www.ahdy.edu.cn/2019/0223/c18a23868/page.htm',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,la;q=0.7',
            'Cookie': 'zg_did=%7B%22did%22%3A%20%2216904f74c0d5cf-0ead283929933b-3e740b5c-1fa400-16904f74c0eac4%22%7D; zg_=%7B%22sid%22%3A%201551061574007%2C%22updated%22%3A%201551061574011%2C%22info%22%3A%201550566509590%2C%22superProperty%22%3A%20%22%7B%7D%22%2C%22platform%22%3A%20%22%7B%7D%22%2C%22utm%22%3A%20%22%7B%7D%22%2C%22referrerDomain%22%3A%20%22ehall.ahdy.edu.cn%22%2C%22cuid%22%3A%20%22170504412%22%2C%22zs%22%3A%200%2C%22sc%22%3A%200%7D'
        }
        file_dir = './'
        file_name = 'ahdy-%s-data.csv' % str(datetime.datetime.now().date())
        file_path = os.path.join(file_dir, file_name)
        self.fp = open(file_path, 'a', encoding='utf-8')

        self.fp.write('新闻标题,发布者,发布时间,浏览次数,新闻内容\n')
        self.regx = [
            r"<span.class=\"news_title\"><a.href='(.*?)'.*?>(.*?)</a></span>[\s\S]*?<span.class=\"news_meta\">(.*?)</span>",
            r"<span class=\"arti_publisher\">(.*?)</span>",
            r'<span class="WP_VisitCount"[\s\S]*?url="(.*?)">',
            r'<div class=\'wp_articlecontent\'>[\s\S]*?</div>',
            r'>([\s\S]*?)<'
        ]
        self.sub_regx = [r'\r|\n']
        print('init ok')

    def response_handler_get(self, url, data):
        response = requests.get(url, data, headers=self.headers)
        response.encoding = 'utf-8'
        return response

    def response_handler_post(self, url, data):
        response = requests.post(url, data, headers=self.headers_post)
        response.encoding = 'utf-8'
        return response

    def parse(self, response):
        text = response.text
        flag_arr = []
        url_y = 'http://www.ahdy.edu.cn'
        for i in re.findall(self.regx[0], text):
            flag_dict = {'新闻标题': i[1], '发布时间': i[2]}
            page = self.response_handler_get(url_y + i[0], {}).text
            flag_dict['发布者'] = ''.join(re.findall(self.regx[1], page))
            liulan = ''.join(re.findall(self.regx[2], page))
            flag_dict['浏览次数'] = ''.join(
                re.sub(self.sub_regx[0], '', self.response_handler_post(url_y + liulan, {}).text))
            flag_dict['新闻内容'] = ''.join(re.findall(self.regx[4], re.findall(self.regx[3], page)[0]))
            flag_dict = {k: str(v).replace(',', '，') for k, v in flag_dict.items()}
            flag_arr.append(flag_dict)
        return flag_arr

    def save_data(self, tiems):
        for i in tiems:
            self.fp.write('{},{},{},{},{}\n'.format(i['新闻标题'],
                                                    i['发布者'],
                                                    i['发布时间'],
                                                    i['浏览次数'],
                                                    i['新闻内容']))

    def main(self):
        url = 'http://www.ahdy.edu.cn/18/list{}.htm'

        for i in range(1, 15):
            print('start {}'.format(url.format(i)))
            response = self.response_handler_get(url.format(i), {})
            items = self.parse(response)
            self.save_data(items)
        self.fp.close()
        print('ok')


if __name__ == '__main__':
    a = Ahdy()
    a.main()
