#!/usr/bin/env python  
# encoding: utf-8   

""" 
@version: v1.0 
@author: null 
@software: PyCharm 
@file: sina_main.py 
@time: 2017.3.5 14:22 
"""

import sina_login_direct as sina
import codecs
import csv
import re

import jieba.analyse
import matplotlib.pyplot as plt
import requests
from scipy.misc import imread
from wordcloud import WordCloud

import math
import time


def fetch_weibo(cookie):
    cards_api = "http://m.weibo.cn/index/my?format=cards"
    cards_page_api = "http://m.weibo.cn/index/my?format=cards&page=%s"

    response = requests.get(url=cards_api,cookies=cookie)
    data = response.json()[0]
    maxPage=data.get("maxPage")
    print("一共",maxPage,"页微博")

    for i in range(1, maxPage):
        response = requests.get(url=cards_page_api % i, cookies=cookie)
        data = response.json()[0]
        print('正在爬第',i,'页')
        groups = data.get("card_group") or []
        for group in groups:
            text = group.get("mblog").get("text")
            text = cleanring(text).strip()
            if text:
                yield text


def write_csv(texts,filename):
    with codecs.open('./'+filename+'.csv', 'w+',encoding='utf-8') as f:
        writer = csv.DictWriter(f, fieldnames=["text"])
        writer.writeheader()
        for text in texts:
            writer.writerow({"text": text})


def read_csv(filename):
    with codecs.open('./'+filename+'.csv', 'r',encoding='utf-8') as f:
        reader = csv.DictReader(f)
        for row in reader:
            yield row['text']

def word_segment(texts):
    jieba.analyse.set_stop_words("./stopwords.txt")
    for text in texts:
        tags = jieba.analyse.extract_tags(text, topK=20)
        yield " ".join(tags)

def generate_img(texts,srcfile,dstfile):
    data = " ".join(text for text in texts)

    mask_img = imread('./'+srcfile, flatten=True)
    wordcloud = WordCloud(font_path='STKAITI.TTF',background_color='white',mask=mask_img).generate(data)
    plt.imshow(wordcloud)
    plt.axis('off')
    plt.savefig('./IMG_'+dstfile+'.jpg', dpi=600)

def third_fetch_weibo(cookie,name):
    proxies = {
        'http': 'http://121.232.145.242:9000'
    }
    third_api='http://m.weibo.cn/container/getIndex?uid=1640571365&luicode=10000011&lfid=100103type=3&q='+name+'&type=uid&value=1640571365&containerid=1076031640571365'
    third_card_api=third_api+'&page=%s'
    response=requests.get(url=third_card_api,cookies=cookie)
    try:
        data = response.json()
    except:
        print('解析错误，也许IP被封了')
        return ' '
    maxNum = data.get("cardlistInfo").get("total")
    print("一共", maxNum, "条微博")
    maxPage = math.ceil(maxNum/10)
    print("一共",maxPage,"页微博")
    startNum=input('从第几页开始')
    for i in range(startNum,maxPage):
        response = requests.get(url=third_card_api % i, cookies=cookie,proxies=proxies)
        data = response.json()
        print('正在爬第', i, '页')
        groups = data.get("cards") or []
        for group in groups:
            if group.get("card_type")==9:
                text = group.get("mblog").get("text")
                text = cleanring(text).strip()
                if text:
                    yield text
        time.sleep(2)
    pass


def cleanring(content):
    """
    去掉无用字符
    """
    pattern = "<a .*?/a>|<i .*?/i>|转发微博|//:|Repost|，|？|。|、|分享图片|<span .*?/span>"
    content = re.sub(pattern, "", content)
    return content

if __name__ == "__main__":
    name = input('请输入微博账号:')
    psw = input('请输入微博密码:')
    session,weiboName = sina.login(name,psw)
    cookie=requests.utils.dict_from_cookiejar(session.cookies)
    flag=input('1.爬自己的微博\n2.爬别人的微博\n')
    if flag=='1':
        weiboText = fetch_weibo(cookie)
        write_csv(weiboText,weiboName)
        generate_img(word_segment(read_csv(weiboName)), 'heart-mask', weiboName)
    elif flag=='2':
        thirdWeiboName=input('请输入微博ID:')
        thirdWeiboText = third_fetch_weibo(cookie,thirdWeiboName)
        write_csv(thirdWeiboText,thirdWeiboName)
        generate_img(word_segment(read_csv(thirdWeiboName)), 'tt.png', thirdWeiboName)
    pass  
