# -*- coding:UTF-8 -*-

import requests
import random
import json
import time
import os
import re
import oss2
from bs4 import BeautifulSoup as bs
import redis
import pymysql
import config
import datetime
import sys
from requests import RequestException


# 获取登录页cookie保持登录状态
def get_cookie():
    try:
        with open('cookie.txt', 'r') as f:
            cookie0 = f.read()
            cookie1 = json.loads(cookie0)
            f.close()
        return cookie1
    except Exception as err:
        print(err)
        return None


# 获取登录会话页url
def get_login_url(cookie):
    try:
        session = requests.session()
        response = session.get('https://mp.weixin.qq.com/', headers={'Connection': 'close'}, cookies=cookie)
        session.keep_alive = False
        if response.status_code == 200:
            return response.url
        else:
            return None
    except Exception as err:
        print(err)
        return None


# 利用cookie与token参数获取搜索页内容
def get_search_page(cookie, token, account):
    searchbiz_url = 'https://mp.weixin.qq.com/cgi-bin/searchbiz?'
    search_dict = {
        'action': 'search_biz',
        'token': token,
        'lang': 'zh_CN',
        'f': 'json',
        'ajax': 1,
        'random': random.random(),
        'query': account,
        'begin': 0,
        'count': 5
    }
    try:
        search_response = requests.get(searchbiz_url, cookies=cookie, params=search_dict)
        if search_response.status_code == 200:
            return search_response.text
        else:
            return None
    except Exception as err:
        print(err)
        return None


# 构造参数begin
def get_page_num(token, fakeid, m_id, begin, end):
    for i in range(begin, end):
        print(f"爬取第{i}页数据")
        r = redis_conn()
        merchant_begin_key = 'merchant:begin:{}'.format(m_id)
        r.set(merchant_begin_key, i)
        appmsg_dict = appmsg_dist(token, fakeid,i)
        yield appmsg_dict


# 获取搜索公众号后的索引页内容
def get_index_page(cookie, appmsg_dict):
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) Apple'
               + 'WebKit/537.36 (KHTML,like Gecko) Chrome/68.0.3440.106 Safari/537.36',
               'Referer': 'https://mp.weixin.qq.com/cgi-bin/appmsg?t=media/appmsg_edit&'
               + 'action=edit&type=10&isMul=1&isNew=1&lang=zh_CN&token=1754696920',
               'Host': 'mp.weixin.qq.com'
               }
    appmsg_url = 'https://mp.weixin.qq.com/cgi-bin/appmsg?'
    try:
        appmsg_response = requests.get(appmsg_url, headers=headers, cookies=cookie, params=appmsg_dict)
        if appmsg_response.status_code == 200:
            return appmsg_response.text
        else:
            return None
    except Exception as err:
        print(err)
        return None


# 解析索引页内容，获取每篇文章的标题与链接
def parse_index_page(index_page):
    detail_list = json.loads(index_page).get('app_msg_list')

    app_msg_cnt = json.loads(index_page).get('app_msg_cnt')
    for item in detail_list:
        link = item.get('link')
        title = item.get('title')
        digest = item.get('digest')
        create_time = item.get('create_time')
        create_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(create_time))
        cover = item.get('cover')
        media_duration = item.get('media_duration')
        yield {
            'title': title,
            'link': link,
            'digest':digest,
            'cover':cover,
            'create_time':create_time,
            'media_duration':media_duration,
            'app_msg_cnt': app_msg_cnt
        }


# 获取文章的详情页内容
def get_detail_page(link):
    try:
        session = requests.session()
        response = session.get(link)
        if response.status_code == 200:
            return response.text
        else:
            return None
    except Exception as err:
        print(err)
        return None


# 解析出文章内容并计算文章字数
def parse_detail_page(content):
    patt = re.compile('class="rich_media_content " id="js_content" style="visibility: hidden;">([\s\S]*?)</div>[\s\S]*?var first_sceen__time', re.S)
    tags = re.findall(patt, content)
    if tags:
        patt1 = r'[\u4e00-\u9fa5]+'
        chars = re.findall(patt1, tags[0])
        c_len = 0
        article_content = ''
        for char in chars:
            char_len = len(char)
            c_len += char_len
            char += char + ' '
            article_content += char
    else:
        raise Exception
    yield {
        'c_len': c_len,
        'article_content': article_content
    }

def appmsg_dist(token, fakeid, i=1):
    appmsg_dict = {
        'token': token,
        'lang': 'zh_CN',
        'f': 'json',
        'ajax': 1,
        'random': random.random(),
        'action': 'list_ex',
        'begin': (i - 1) * 5,
        'count': 5,
        'query': '',
        'fakeid': fakeid,
        'type': 9
    }
    return appmsg_dict

#获取总页数
def get_end_count(cookie, item, end=None):
    if end:
        return end
    else:
        index_page = get_index_page(cookie, item)
        app_msg_cnt = None
        for item in parse_index_page(index_page):
            app_msg_cnt = item['app_msg_cnt']
        if app_msg_cnt:
            app_msg_cnt = app_msg_cnt // 5 + 1
        return app_msg_cnt

#解析正文
def parse_content(url, account):
    print(url)
    response = requests.get(url)
    html = response.text
    html = str(html)
#     findDiv = re.compile('<div class="rich_media_content " id="js_content" style="visibility: hidden;">(.*?)</div>',re.S)
#     findDiv = re.compile('''<div class="rich_media_content             autoTypeSetting24psection"(.*?)id="js_content" style="visibility: hidden;">(.*?)</div>''',re.S)
    findDiv = re.compile('''<div class="rich_media_content(.*?)id="js_content" style="visibility: hidden;">(.*?)</div>''',re.S)
    div = re.findall(findDiv, html)
    if len(div):
        div = ''.join(div[0][1])
    else:
        findDiv = re.compile('''<div id="js_content">(.*?)</div>''',re.S)
        div = re.findall(findDiv, html)
        div = ''.join(div[0][1])

#     print(div[0][1].decode('utf-8'))
#     sys.exit(0)
#     div = ''.join(div)
    div = div.replace(r'\n', '')
    # download_media(div, account)
    div = replace_img(div, account)

    return div
#下载多媒体
def download_media(html, account, title):
    soup = bs(html, "lxml")
    # video
    if not os.path.exists(account):
        os.mkdir(account)
    path = os.path.join(account, title)
    file = "../spider_files/{}.mp4".format(path)
    if os.path.isfile(file):
        # url = upload_file("{}.mp4".format(path))
        url = upload_file(file)
        return url
    video_url = re.findall(r"url: \'(.+)\',\n", html)
    if video_url:
        video_url = [url for url in video_url if "videoplayer" not in url]
        if video_url:
            video_url = video_url[0].replace(r"\x26", "&")
            doc = requests.get(video_url)
            with open(file, "wb") as f:
                f.write(doc.content)
            url = upload_file(file)
            return url
#下载图片
def download_img(url, account):
    """
    Parameters
    ----------
    url: str
        图片链接

    Returns
    ----------
    str: 下载图片的本地路径
    """
    account_path = '../spider_files/'+account
    if not os.path.isdir(account_path):
        os.mkdir(account_path)
    #文件夹目录
    img_path = os.path.join(account_path, 'imgs')
    print(url)
    # 根据链接提取图片名
    name = "{}.{}".format(url.split("/")[-2], url.split("/")[3].split("_")[-1])
    if name.split('.')[1] not in ['jpg', 'png', 'jpeg', 'bmp']:
        name = name.replace(name.split('.')[1], 'jpg')
    #文件路径
    imgpath = os.path.join(img_path, name)
    # 文件目录是否存在
    if not os.path.isdir(img_path):
        os.mkdir(img_path)
    # 如果该图片已被下载，可以无需再下载，直接返回路径即可
    if os.path.isfile(imgpath):
        with open(imgpath, "rb") as f:
            img = f.read()
        return imgpath, img

    response = requests.get(url)
    img = response.content
    with open(imgpath, "wb") as f:
        f.write(img)
    return imgpath, img

#上传图片到OSS
def upload_file(file):
    # 阿里云配置
    ALIOSS_ID = config.ALIOSS_ID
    ALIOSS_SECRET = config.ALIOSS_SECRET
    ALIOSS_Endpoint = config.ALIOSS_Endpoint
    ALIOSS_BUCKET = config.ALIOSS_BUCKET
    ALIOSS_HOST = config.ALIOSS_HOST
    auth = oss2.Auth(ALIOSS_ID, ALIOSS_SECRET)
    bucket = oss2.Bucket(auth, ALIOSS_Endpoint, ALIOSS_BUCKET)

    path = time.strftime("%Y%m", time.localtime())
    day = time.strftime("%d", time.localtime())
    file_name = file.rsplit('/', 1)[1]
    object = path + "/" + day + "/" + file_name
    result = bucket.put_object_from_file(object, file)
    file_url = ALIOSS_HOST + '/' + object
    return file_url

#替换oss图片到正文
def replace_img(html, account):
    data_src_re = re.compile(r'data-src="(.*?)"')
    data_croporisrc_re = re.compile(r'data-croporisrc="(.*?)"')
    src_re = re.compile(r'src="(.*?)"')
    data_croporisrc_lst = data_croporisrc_re.findall(html)
    data_src_lst = data_src_re.findall(html)
    src_lst = src_re.findall(html)
    img_url_lst = data_croporisrc_lst + data_src_lst + src_lst
    img_lst = []
    for img_url in img_url_lst:
        if "mmbiz.qpic.cn" in img_url:
            # print(img_url)
            data_src, img = download_img(img_url, account)
            oss_img = upload_file(data_src)
            img_lst.append([data_src, img])
            # 以绝对路径的方式替换图片
            html = html.replace(img_url, oss_img).replace("data-src=", "src=")
    return html

def parse_video(url, account, title):
    response = requests.get(url)
    html = str(response.text)
    video_url = download_media(html, account, title)
    return video_url

def main(account, m_id, begin, end=None):
    cookie = get_cookie()
    login_url = get_login_url(cookie)
    # 解析登录页url中的重要参数token
    token = re.findall('token=(.*)', login_url)[0]
    search_page = get_search_page(cookie, token, account)
    # 解析搜索页中的重要参数fakeid
    fakeid = json.loads(search_page).get('list')[0].get('fakeid')
    # 最大页
    item = appmsg_dist(token, fakeid)
    end = get_end_count(cookie, item, end)
    for item in get_page_num(token, fakeid, m_id, int(begin), end):
        wait_time_arr = [3,5,6,7,8,9,10,15,20]
        choice_time = random.choice(wait_time_arr)
        time.sleep(choice_time)

        index_page = get_index_page(cookie, item)
        # 时间间隔防止反爬
        for item0 in parse_index_page(index_page):
            dict = {}
            title = item0.get('title')
            link = item0.get('link')
            media_duration = item0.get('media_duration')
            if media_duration == "0:00":
                print("解析文章")
                content = parse_content(link, account)
                dict['content'] = content
                type = 'article'
            else:
                print("解析视频")
                video_url = parse_video(link, account, title)
                dict['video_url'] = video_url
                type = 'video'
            result = get_mysql(link, type)
            if result:
                continue
            cover = item0.get('cover')
            print('cover: '+cover)
            if cover:
                data_src, img = download_img(cover, account)
                cover = upload_file(data_src)
            publish_at = item0.get('create_time')
            digest = item0.get('digest')

            dict['link'] = link
            dict['account'] = account
            dict['title'] = title
            dict['cover'] = cover
            dict['publish_at'] = publish_at
            dict['created_at'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            dict['digest'] = digest
            dict['m_id'] = m_id
            #保存数据存
            save_mysql(dict, account)
def redis_conn():
    conn = redis.Redis(host=config.redis_config['host'], port=config.redis_config['port'], db=config.redis_config['db'], password=config.redis_config['password'])
    return conn

def get_mysql(link, type):
    conn = pymysql.connect(host=config.mysql_config['host'], user=config.mysql_config['user'],
                           password=config.mysql_config['password'], db=config.mysql_config['db'])
    cur = conn.cursor()
    if type == 'article':
        article_sql = "select * from ufutx_anchor_articles where link = '{}'".format(link)
        cur.execute(article_sql)
        article_data = cur.fetchone()
        if article_data is not None:
            return True
    else:
        video_sql = "select * from ufutx_anchor_videos where link = '{}'".format(link)
        cur.execute(video_sql)
        video_data = cur.fetchone()
        if video_data is not None:
            return True
    return False
def save_mysql(dict, account):
    dict['channel'] = "official"
    conn = pymysql.connect(host=config.mysql_config['host'], user=config.mysql_config['user'], password=config.mysql_config['password'], db=config.mysql_config['db'])
    cur = conn.cursor()
    try:
        if "content" in dict:
            sql = "insert into ufutx_anchor_articles(m_id, title, account, link, digest, cover, content, channel, publish_at, created_at) values ('{}','{}','{}','{}','{}','{}','{}','{}','{}','{}')".format(dict['m_id'], dict['title'],dict['account'], dict['link'], dict['digest'], dict['cover'], dict['content'], dict['channel'], dict['publish_at'], dict['created_at'])
        else:
            sql = "insert into ufutx_anchor_videos(m_id, title, account, link, digest, cover, video_url, channel, publish_at, created_at) values ('{}','{}','{}','{}','{}','{}','{}','{}','{}','{}')".format(dict['m_id'], dict['title'],dict['account'], dict['link'], dict['digest'], dict['cover'], dict['video_url'], dict['channel'], dict['publish_at'], dict['created_at'])
        cur.execute(sql)
        conn.commit()
        print("插入数据库成功")
    except Exception as e:
        conn.rollback()
        redis_con = redis_conn()
        redis_con.srem(account, dict['link'])
        print(e)
    cur.close()
    conn.close()
if __name__ == '__main__':
    r = redis_conn()
    # key = 'merchant:weixin'
    # res = r.rpop(key)
    account = ''
    res = '{"m_id":"491","value":"福恋"}'
    if res:
        obj = json.loads(res)
        account = obj['value']
        m_id = obj['m_id']
    if account:
    # account_arr = ["禾禾哈熹","生命树婚恋","国际爱邻机构"]
    # for account in account_arr:
        print(f"{account} 爬虫开始")
        merchant_begin_key = 'merchant:begin:{}'.format(m_id)
        begin = r.get(merchant_begin_key) if r.get(merchant_begin_key) else 1
        main(account, m_id, begin)
        print(f"{account} 爬虫结束")

