# -*- coding:UTF-8 -*-

import requests
import pymysql
import random
import json
import time
import os
import re
from requests import RequestException


# 获取登录页cookie保持登录状态
def get_cookie():
    try:
        with open('cookie.txt', 'r') as f:
            cookie0 = f.read()
            cookie1 = json.loads(cookie0)
            f.close()
        return cookie1
    except IOError, err:
        print err
        return None


# 获取登录会话页url
def get_login_url(cookie):
    try:
        session = requests.session()
        response = session.get('https://mp.weixin.qq.com/', headers={'Connection': 'close'}, cookies=cookie)
        session.keep_alive = False
        if response.status_code == 200:
            return response.url
        else:
            return None
    except RequestException, err:
        print err
        return None


# 利用cookie与token参数获取搜索页内容
def get_search_page(cookie, token):
    searchbiz_url = 'https://mp.weixin.qq.com/cgi-bin/searchbiz?'
    search_dict = {
        'action': 'search_biz',
        'token': token,
        'lang': 'zh_CN',
        'f': 'json',
        'ajax': 1,
        'random': random.random(),
        'query': 'meirentan',
        'begin': 0,
        'count': 5
    }
    try:
        search_response = requests.get(searchbiz_url, cookies=cookie, params=search_dict)
        if search_response.status_code == 200:
            return search_response.text
        else:
            return None
    except RequestException, err:
        print err
        return None


# 构造参数begin
def get_page_num(token, fakeid, begin, end):
    for i in range(begin, end):
        appmsg_dict = {
            'token': token,
            'lang': 'zh_CN',
            'f': 'json',
            'ajax': 1,
            'random': random.random(),
            'action': 'list_ex',
            'begin': (i-1)*5,
            'count': 5,
            'query': '',
            'fakeid': fakeid,
            'type': 9
        }
        yield appmsg_dict


# 获取搜索公众号后的索引页内容
def get_index_page(cookie, appmsg_dict):
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) Apple'
               + 'WebKit/537.36 (KHTML,like Gecko) Chrome/68.0.3440.106 Safari/537.36',
               'Referer': 'https://mp.weixin.qq.com/cgi-bin/appmsg?t=media/appmsg_edit&'
               + 'action=edit&type=10&isMul=1&isNew=1&lang=zh_CN&token=1754696920',
               'Host': 'mp.weixin.qq.com'
               }
    appmsg_url = 'https://mp.weixin.qq.com/cgi-bin/appmsg?'
    try:
        appmsg_response = requests.get(appmsg_url, headers=headers, cookies=cookie, params=appmsg_dict)
        if appmsg_response.status_code == 200:
            return appmsg_response.text
        else:
            return None
    except RequestException, err:
        print err
        return None


# 解析索引页内容，获取每篇文章的标题与链接
def parse_index_page(index_page):
    detail_list = json.loads(index_page, encoding='utf-8').get('app_msg_list')
    for item in detail_list:
        link = item.get('link')
        title = item.get('title')
        yield {
            'title': title,
            'link': link
        }


# 获取文章的详情页内容
def get_detail_page(link):
    try:
        session = requests.session()
        response = session.get(link)
        if response.status_code == 200:
            return response.text
        else:
            return None
    except RequestException, err:
        print err
        return None


# 解析出文章内容并计算文章字数
def parse_detail_page(content):
    patt = re.compile('class="rich_media_content " id="js_content">([\s\S]*?)</div>[\s\S]*?var first_sceen__time', re.S)
    tags = re.findall(patt, content)
    if tags:
        patt1 = ur'[\u4e00-\u9fa5]+'
        chars = re.findall(patt1, tags[0])
        c_len = 0
        article_content = ''
        for char in chars:
            char_len = len(char)
            c_len += char_len
            char += char + ' '
            article_content += char
    else:
        raise Exception
    yield {
        'c_len': c_len,
        'article_content': article_content
    }


def main(begin, end):
    cookie = get_cookie()
    login_url = get_login_url(cookie)
    # 解析登录页url中的重要参数token
    token = re.findall('token=(.*)', login_url)[0]
    search_page = get_search_page(cookie, token)
    # 解析搜索页中的重要参数fakeid
    fakeid = json.loads(search_page).get('list')[0].get('fakeid')
    for item in get_page_num(token, fakeid, begin, end):
        index_page = get_index_page(cookie, item)
        # 时间间隔防止反爬
        time.sleep(4)
        for item0 in parse_index_page(index_page):
            url = item0.get('link')
            title = item0.get('title')
            # 根据标题筛选出想要的文章
            if u'公布' not in unicode(title) and u'名单' not in unicode(title) and unicode(title) != u'分享图片':
                try:
                    print '%s%s' % ('title: ', unicode(title).encode('gbk'))
                except UnicodeEncodeError:
                    print '-----display error!-----'
                # 标题字数计算
                patt3 = ur'[\u4e00-\u9fa5]'
                chars1 = re.findall(patt3, unicode(title))
                chars2 = re.findall('\w+', title)
                t_len = len(chars1)+len(chars2)
                print '%s%s' % ('Title Word Count: ', t_len)
                time.sleep(3)
                content = get_detail_page(url)
                if content:
                    for item1 in parse_detail_page(content):
                        c_len = item1['c_len']
                        content = item1['article_content']
                        print '%s%s' % ('Article Word Count: ', c_len)
                else:
                    print 'No content'
                print '%s%s' % ('saving to table t1...', os.linesep)
                cursor.execute('insert into t1(title, t_len, c_len, content, url) values (%s,%s,%s,%s,%s)', (title, t_len, c_len, content, url))
                db.commit()


if __name__ == '__main__':
    db = pymysql.connect(
        host='127.0.0.1',
        port=3306,
        user='root',
        passwd='lsq910561556Wy',
        db='weixin',
        charset='utf8'
    )
    cursor = db.cursor()
    sql_create = \
        'create table t1(id int auto_increment primary key,title varchar(200) not null,t_len ' \
        'int not null,c_len int not null,content varchar(7000) not null,url varchar(255) not null);'
    cursor.execute(sql_create)
    print '%s%s' % ('已创建表t1，开始录入数据……'.decode('utf-8').encode('gbk'), os.linesep)
    main(5, 35)
    print '%s%s' % (os.linesep, '数据录入完毕！'.decode('utf-8').encode('gbk'))
    cursor.close()
    db.close()
