# -*- coding: utf-8 -*-
# au: weiran
# 20171210
import urllib.request
import re
import random
import pymysql
# import scrapy
from lxml import etree
import http.cookiejar
import os
import time
import datetime
import urllib.error


import numpy as np
import pandas as pda
import matplotlib.pylab as pyl
import jieba
from PIL import Image
import wordcloud as wc

######################## 第一部分 爬取 和讯博客数据
db = pymysql.connect(host="localhost", user="<your name>", password="<your password>", db="csdnstudy",
                             charset="utf8")
g_aidLst = []

def buildOpener():
    uapools = (
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36",
        "User-Agent:Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
        "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 "
    )
    thisua = random.choice(uapools)
    print("=" * 50)
    print(thisua)
    headers = ("User-Agent", thisua)
    opener = urllib.request.build_opener()
    opener.addheaders=[headers]
    return opener
    # pat = r'<div class="content">.*?<span>(.*?)</span>.*?</div>'
    # urllib.request.install_opener(opener)

def insertMySql(sql):
    global db
    db.query(sql)
    db.commit()



# class Article():
    # def __init__(self):
        # self.m_uaid = ""
        # self.m_caid = ""
        # self.m_atitle = ""
        # self.m_aurl = ""
        # self.m_aclick = -1
        # self.m_acomment = -1


# 获取一个人的单页数据
def getOnePersonSinglePage(mainurl):
    global g_aidLst
    # mainurl = "http://dongdengxin.blog.hexun.com/"
    # mainurl = "http://dongdengxin.blog.hexun.com/p2/default.html"
    while True:
        try:
            urldata = urllib.request.urlopen(mainurl).read().decode("gb2312", 'ignore')
            break
        except Exception as err:
            print(err)
    print(len(urldata))
    # print(urldata)

    packpat = r'<script type="text/javascript" src="(.*?)"></script>'  # ???
    packurlLst = re.compile(packpat, re.S).findall(urldata)
    if (0 == len(packurlLst)):
        return -1
    packurl = packurlLst[0]
    print(packurl)

    aIdLst = packurl.split('=')[-1].split('-')
    if 1 == len(aIdLst):
        return -1
    print(aIdLst)

    while True:
        try:
            packdata = urllib.request.urlopen(packurl).read().decode("gb2312", 'ignore')
            break
        except Exception as err:
            print(err)
    print(packdata)

    # </span><a href='http://dongdengxin.blog.hexun.com/113897931_d.html'>美国股市的开放性与包容性</a></span>
    for aId in aIdLst:
        # print(aId)
        if aId in g_aidLst:
            continue
        g_aidLst.append(aId)

        pat1 = r"</span><a href='(http://[a-zA-Z0-9]*?.blog.hexun.com/" + str(aId) + "_d.html)'>(.*?)</a></span>"
        rstLst = re.compile(pat1, re.S).findall(urldata)
        # print(rstLst)

        pat2 = r"'click" + str(aId) + "','(.*?)'"
        pat3 = r"'comment" + str(aId) + "','(.*?)'"
        try:
            click = re.compile(pat2, re.S).findall(packdata)[0]
            comment = re.compile(pat3, re.S).findall(packdata)[0]
        except Exception as err:
            print(err)
            return -1

        print(rstLst[0][0] + ' ' + rstLst[0][1] + ' ' + str(click) + ' ' + str(comment) )
        # print(rstLst[0][0] + '\n ' + rstLst[0][1] + '\n ' + str(click) + '\n ' + str(comment))

        # db = pymysql.connect(host="localhost", user="<your name>", password="<your pwd>", db="crawlerusepython",
        #                      charset="utf8")
        # for i in range(len(item)):
        sql = 'insert into hexunFinanceBlog(aid, atitle, aurl, aclick, acomment) values("%s", "%s", "%s", "%s", "%s")' \
              % (str(aId), rstLst[0][0], rstLst[0][1], str(click), str(comment))
        # sql = "insert into jingdong_tbl(commodity_title, commodity_link, commodity_comment)"
        # print(sql)
        insertMySql(sql)
    return 0

# 获取一个人的单页数据链接
def getOnePersonMultiPage(auId):
    global g_aidLst
    opener = buildOpener()
    urllib.request.install_opener(opener)
    page = 138
    while True:
        curDate = datetime.datetime.now().strftime('%Y%m%d %H:%M:%S')
        print('-' * 15 + 'page: ' + str(page) + ' Current time: ' + curDate + ' total items: ' + str(len(g_aidLst)))
        # mainurl = "http://5265286.blog.hexun.com/p" + str(page) + "/default.html"
        # mainurl = "http://dongdengxin.blog.hexun.com/p" + str(page) + "/default.html"
        mainurl = f"http://{auId}.blog.hexun.com/p{page}/default.html"
        rst = getOnePersonSinglePage(mainurl)
        if (-1 == rst):
            break
        if (10000 < len(g_aidLst)):
            return 1
        page += 1
    return 0

# 爬取数据从这里开始，获取所有人的ID
def getFinaceAuthorId():
    opener = buildOpener()
    urllib.request.install_opener(opener)
    finaceUrl = "http://f.blog.hexun.com/"
    auIdLst = []
    # <a.*?href="(http://hexun.com/[0-9]*?/default.html)">.*?</a><span>
    urldata = urllib.request.urlopen(finaceUrl).read().decode("gb2312", 'ignore')
    aupat = r'<a.*?href="(http://hexun.com/[0-9]*?/default.html)">.*?</a><span>'
    auAllLst = re.compile(aupat).findall(urldata)
    print(auAllLst)
    for auIdUrl in auAllLst:
        auId = auIdUrl.split('/')[-2]
        if auId  in auIdLst:
            continue
        auIdLst.append(auId)

    for auId in auIdLst:
        print('=' * 50)
        print(str(auId))
        rst = getOnePersonMultiPage(auId)
        if (1 == rst):
            print("Ok ,we got more than 10000 items!")
            return 1
    return 0

################################################################
## 第二部分，分析数据
################################################################

# 云词分析
def wordcloudAnalysis(sqldata):
    allTitle = ""
    for i in range(len(sqldata)):
        allTitle += sqldata['aurl'][i] + '\n'

    cutTitles = jieba.cut(allTitle)
    allTitles = ""
    for word in cutTitles:
        # print(str(word))
        allTitles += str(word) + ' '

    pfont = r'C:/Windows/Fonts/simhei.ttf'
    # mask = Image.open("E:/DB_OpenCV/MultimediaData/mask2.png") #
    mask = Image.open("./mask2.png")  #
    arrmask = np.array(mask)
    financeWC = wc.WordCloud(collocations=False, font_path=pfont, mask=arrmask, background_color="white").generate(allTitles)
    pyl.imshow(financeWC)
    pyl.savefig("./financeWC.jpg")
    pyl.show()

# 去除异常数据以及绘图分析
def histAnalysis(sqldata):
    tsqldata = sqldata.T
    clickLst = tsqldata.values[4]
    commentLst = tsqldata.values[5]
    pyl.subplot(2, 2, 1)
    pyl.plot(clickLst, commentLst, 'og') # 处理前分布
    # pyl.show()
    ###
    rowlen = len(sqldata.values)
    collen = len(sqldata.values[0])
    arrdata = sqldata.values

    maxClick = 33000 # 事实上，点击数和评论此处都比较正常，设置最大是为了显示程序功能
    maxComment = 300 #

    nNew = 0
    newdata = type(arrdata)

    for i in range(0, rowlen):
        # for j in range(0, collen):
        # print(arrdata[i])
        if (arrdata[i][4] > maxClick):
            continue
        elif (arrdata[i][5] > maxComment):
            continue
        elif (arrdata[i][5] > arrdata[i][4]): # 增加的一个逻辑功能，认为点击数不应该比评论数少
            continue
        else:
            if (0 == nNew):
                newdata = arrdata[i]
            else:
                newdata = np.row_stack((newdata, arrdata[i]))
            nNew += 1

    print('New array nums: ' + str(nNew))

    tarrdata = newdata.T
    clickLst2 = tarrdata[4]
    commentLst2 = tarrdata[5]

    pyl.subplot(2, 2, 2)
    pyl.plot(clickLst2, commentLst2, 'o') # 处理后分布图
    # pyl.show()

    clickmax = tarrdata[4].max()
    clickmin = tarrdata[4].min()
    commentmax = tarrdata[5].max()
    commentmin = tarrdata[5].min()

    clickrg = clickmax - clickmin
    commentrg = commentmax - commentmin

    clickdst = clickrg / 13
    commentdst = commentrg / 13

    clicksty = np.arange(clickmin, clickmax, clickdst)
    pyl.subplot(2, 2, 3)
    pyl.hist(tarrdata[4], clicksty)

    commentsty = np.arange(commentmin, commentmax, commentdst)
    pyl.subplot(2, 2, 4)
    pyl.hist(tarrdata[5], commentsty)

    pyl.savefig('./histAnalysis.jpg')
    pyl.show()

# 分析数据从这里开始
def analysisHexun():
    db = pymysql.connect(host="localhost", user="<your name>", password="<your password>", db="csdnstudy",
                         charset="utf8")
    # sql = "select * from hexunFinanceBlog limit 0,1000"
    sql = "select * from hexunFinanceBlog"
    sqldata = pda.read_sql(sql, db)
    print(sqldata.head())
    print(sqldata.describe())

    # pretreatment
    sqldata['aclick'][(sqldata['aclick'] == 0)] = None

    nWrongItems = 0
    clickMedian = sqldata['aclick'].median()
    for i in range(len(sqldata)):
        if (sqldata['aclick'].isnull())[i]:
            sqldata['aclick'][i] = clickMedian
            nWrongItems += 1

    print("we had deal abnormal items: " + str(nWrongItems))

    wordcloudAnalysis(sqldata)

    histAnalysis(sqldata)


if __name__ == '__main__':
    # 1、爬取模块：
    # getFinaceAuthorId()
    # getOnePersonMultiPage()
    # db.close()

    ##########################
	# 2、分析模块
    analysisHexun()

    print("All done 感谢您的运行 1210")

