#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import urllib.request

from utils.weiboUtil import WeiboContent, weibo_util

# 设置代理IP
proxy_addr = "221.1.200.242:38652"

fun_nums = 15
motion_nums = 5

# 定义页面打开函数
def use_proxy(url, proxy_addr):
    req = urllib.request.Request(url)
    req.add_header("User-Agent",
                   "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0")
    proxy = urllib.request.ProxyHandler({'http': proxy_addr})
    opener = urllib.request.build_opener(proxy, urllib.request.HTTPHandler)
    urllib.request.install_opener(opener)
    data = urllib.request.urlopen(req).read().decode('utf-8', 'ignore')
    print(data)
    return data


# 获取微博主页的containerid，爬取微博内容时需要此id
def get_containerid(url):
    data = use_proxy(url, proxy_addr)
    content = json.loads(data).get('data')
    for data in content.get('tabsInfo').get('tabs'):
        if (data.get('tab_type') == 'weibo'):
            containerid = data.get('containerid')
    return containerid


# 获取微博大V账号的用户基本信息，如：微博昵称、微博地址、微博头像、关注人数、粉丝数、性别、等级等
def get_userInfo(id):
    url = 'https://m.weibo.cn/api/container/getIndex?type=uid&value=' + id
    data = use_proxy(url, proxy_addr)
    content = json.loads(data).get('data')
    profile_image_url = content.get('userInfo').get('profile_image_url')
    description = content.get('userInfo').get('description')
    profile_url = content.get('userInfo').get('profile_url')
    verified = content.get('userInfo').get('verified')
    guanzhu = content.get('userInfo').get('follow_count')
    name = content.get('userInfo').get('screen_name')
    fensi = content.get('userInfo').get('followers_count')
    gender = content.get('userInfo').get('gender')
    urank = content.get('userInfo').get('urank')
    print("微博昵称：" + name + "\n" + "微博主页地址：" + profile_url + "\n" + "微博头像地址：" + profile_image_url + "\n" + "是否认证：" + str(
        verified) + "\n" + "微博说明：" + description + "\n" + "关注人数：" + str(guanzhu) + "\n" + "粉丝数：" + str(
        fensi) + "\n" + "性别：" + gender + "\n" + "微博等级：" + str(urank) + "\n")


# 获取微博内容信息,内容包括：每条微博的内容、微博详情页面地址、点赞数、评论数、转发数等
# mode 1 博主  2 搜索
# def get_weibo(id, usernum, contlist, titlelist,mode, search_cont):
def get_weibo(dict, contlist, titlelist, mode, search_cont):
    # 第几页
    page = 1;
    # 点赞数，只要超过该点赞数才会被爬取
    goodNums = 150

    # 微博搜索模式
    #if(mode is not None and search_cont is not None and mode == 2):
    #   weibo_url = weiboSearchUrl + search_cont

    enable_nums = 0
    # 逐一爬取不同博主的微博
    for id, nums in dict.items():
        enable_nums = enable_nums + nums
        url = 'https://m.weibo.cn/api/container/getIndex?type=uid&value=' + id
        weibo_url = 'https://m.weibo.cn/api/container/getIndex?type=uid&value=' + id + '&containerid=' + get_containerid(
            url) + '&page=' + str(page)
        get_userInfo(id)
        is_success = get_weibo_to_cont(weibo_url,enable_nums,goodNums,page,contlist,titlelist)
        if(is_success == False):
            page = page + 1
            weibo_url = 'https://m.weibo.cn/api/container/getIndex?type=uid&value=' + id + '&containerid=' + get_containerid(
                url) + '&page=' + str(page)
            get_weibo_to_cont(weibo_url,nums,goodNums,page,contlist,titlelist)

# 获取每日搞笑板块微博
def get_daily_funny(contlist, titlelist):
    goodNums = 150
    weibo_url = "https://m.weibo.cn/api/container/getIndex?containerid=102803_ctg1_4388_-_ctg1_4388&openApp=0"
    get_weibo_to_cont(weibo_url, None, goodNums, 1, contlist, titlelist)

def get_daily_motion(contlist, titlelist):
    goodNums = 150
    weibo_url = "https://m.weibo.cn/api/container/getIndex?containerid=102803_ctg1_1988_-_ctg1_1988&openApp=0"
    get_weibo_to_cont(weibo_url, motion_nums, goodNums, 1, contlist, titlelist)

def get_weibo_to_cont(weibo_url,nums,goodNums,page,contlist,titlelist):
    data = use_proxy(weibo_url, proxy_addr)
    content = json.loads(data).get('data')
    cards = content.get('cards')
    if(nums is None):
        nums = len(cards)
    try:
        if (len(cards) > 0):
            # 有效数目
            for j in range(len(cards)):
                # 已经达到有效数目，成功
                if(len(contlist) == nums):
                    return True
                # 如果已经到页面的最后一条，则做翻页处理
                if(j == len(cards) - 1):
                    return False

                print("-----正在爬取第" + str(page) + "页，第" + str(j) + "条微博------")
                card_type = cards[j].get('card_type')
                if (card_type == 9):
                    mblog = cards[j].get('mblog')
                    # 点赞数
                    attitudes_count = mblog.get('attitudes_count')
                    # 评论数
                    comments_count = mblog.get('comments_count')
                    # 发布时间
                    created_at = mblog.get('created_at')
                    # 转发数
                    reposts_count = mblog.get('reposts_count')
                    # 微博地址
                    scheme = cards[j].get('scheme')
                    text = mblog.get('text')

                    # 保存微博到mysql
                    # weibo_cont = WeiboContent(text, None, None, comments_count, None, attitudes_count)
                    # res = weibo_util.post_weibo(weibo_cont)
                    # 获取微博id
                    # weibo_id = res["data"]

                    # V1.0.1修改
                    # 微博只有点赞数 > 一定数目才作为有效数据
                    # if(attitudes_count < goodNums):
                    #      continue
                    # 微博图片爬取
                    pics = mblog.get('pics')
                    finalPicDiv = ""
                    if pics is not None and len(pics) > 0:
                        for index in range(0, len(pics)):
                            pic = pics[index]
                            picurl = pic.get('url')
                            print('pic:' + picurl)
                            # 暂不支持动图，以后只能取指定大小内的图片0
                            picurl = str(picurl).replace("orj360","large")

                            picDiv = "<div class='pic-margin'><img src=" + picurl + "/></div>"
                            finalPicDiv += picDiv
                    else:
                        # 直到找到有图的博文为止
                        continue

                    # 系列组图
                    contlist.append(finalPicDiv)
                    # 系列标题
                    titlelist.append(text)

                    '''
                    with open(file, 'a', encoding='utf-8') as fh:
                        fh.write("----第" + str(page) + "页，第" + str(j) + "条微博----" + "\n")
                        fh.write("微博地址：" + str(scheme) + "\n" + "发布时间：" + str(
                            created_at) + "\n" + "微博内容：" + text + "\n" + "点赞数：" + str(
                            attitudes_count) + "\n" + "评论数：" + str(comments_count) + "\n" + "转发数：" + str(
                            reposts_count) + "\n")
                    '''
    except Exception as e:
        print(e)
        pass


# 拼接模板内容跟爬取内容，形成最后输出
def get_final_output(file,temlist,conlist,titlelist):
    finalcont = ""
    for i,tem in enumerate(temlist):
        if(i > len(conlist) - 1):
            break
        k = i + 1
        tem = tem.replace('标题' + str(k), titlelist[i])
        line = tem + conlist[i]
        finalcont += line
    write_file(file,finalcont)

# 读取模板内容
def read_template_2_list(file):
    htmlf = open(file, 'r', encoding="utf-8")
    htmlcont = htmlf.read()
    print(htmlcont)
    temlist = htmlcont.split('placeholder')
    return temlist

# 写入文件,会删除原文件内容,文件不存在将创建
def write_file(file,cont):
    fo = open(file, "w", encoding="utf-8")
    fo.write(cont)
    fo.close()

if __name__ == "__main__":
    '''
    假装在采访 5095435677  20
    史上第一最最搞 1134796120  10
    如厕必备读物 2322168320  10
    差评君 互联网资讯 5734325998  5
    
    趣闻搞笑 2396658275
    搞笑公 2287235162 
    治愈系心理学 1097201945
    
    PoemsForYou 知名人文艺术博主 5668580668
    
    '''
    # dict = {'6088583586': 10, '2287235162': 6, '1097201945': 4, '2322168320': 3}
    dict = {'6088583586': 10}

    file = "static/template2.html"
    outfile = "static/output.html"
    temlist = read_template_2_list(file)
    # 闭包存值
    contlist = []
    titlelist = []
    get_weibo(dict, contlist, titlelist, None, None)
    # get_daily_funny(contlist, titlelist)
    # get_final_output(outfile,temlist,contlist,titlelist)
