import xlrd
import pandas as pd
import numpy as np
from pyecharts.charts import Bar, Grid, Line, Liquid, Page, Pie,WordCloud
from pyecharts.globals import SymbolType
from pyecharts import options as opts
import collections
from collections import Counter

catalog=['社会小说','情感小说','青春文学','科幻经典','战争军旅','玄幻精品','恐怖惊悚','悬疑推理','世界名著','外国文学','武侠经典']
def readData(dataPath,colname):
    data = pd.read_excel(dataPath, index_col='图书类别')
    datalist=data.loc[colname]
    #
    result = datalist.values.tolist()
    #print(datalist)_
    print(result)
    return result
def getBar(list_x,list_y):
    bar = Bar()
    bar.set_global_opts(title_opts=opts.TitleOpts(title="微信读书各类书籍前20平均推荐度柱状图"))
    bar.add_xaxis(list_x)
    bar.add_yaxis("平均推荐指数",list_y,color='palevioletred')
    bar.render("percent.html")

if __name__=="__main__":

    dataPath="D:/PythonSpace/bookData/wxbooks.xls"
    data_social_novel=readData(dataPath,catalog[0])
    data_love=readData(dataPath,catalog[1])
    data_yonth=readData(dataPath,catalog[2])
    data_science_fic=readData(dataPath,catalog[3])
    data_war=readData(dataPath,catalog[4])
    data_magic=readData(dataPath,catalog[5])
    data_fear=readData(dataPath,catalog[6])
    data_detective=readData(dataPath,catalog[7])
    data_master=readData(dataPath,catalog[8])
    data_foreign=readData(dataPath,catalog[9])
    data_wuxia=readData(dataPath,catalog[10])
    ave_percent=[86.0, 85.0, 83.0, 86.0, 77.0, 86.0, 77.0, 84.0, 89.0, 83.0, 85.0]
    ave_numReader=[2981, 891, 178, 637, 70, 880, 511, 1230, 1464, 1022, 1025]
    movie_num=[14, 4, 9, 8, 11, 13, 2, 9, 15, 8, 8]
    authors1 = ['莫言', '麦家', '莫言', '钱锺书', '余华', '阎真', '王小波', '林奕含', '林奕含', '乔治·奥威尔', '弗雷德里克·巴克曼', '弗雷德里克·巴克曼', '刘震云', '小川糸', '毛姆', '刘震云', '赫尔曼·黑塞', '贾平凹', '余华', '余华']
    authors2 = ['张嘉佳', '桐华', '酒小七', '桃桃一轮', '辛夷坞', '张嘉佳', '阿琐', '顾漫', '阿琐', '张嘉佳', '阿琐', '酒小七', '柏林石匠', '桐华', '东奔西顾', '书海沧生', '穿行四季', '白饭如霜', '顾漫', '乔一']
    authors3 = ['八月长安', '狄戈', '八月长安', '烽火戏诸侯', '景行', '夜黑羽', '顾漫', '酒小七', '小北233', '八月长安', '辛夷坞', '苏幸安', '苏幸安', '两色风景', '辛夷坞', '绿亦歌', '知稔', '韩寒', '匪我思存']
    authors4 = ['刘慈欣', '丹尼尔·凯斯', '米歇·戴斯玛克特', '弗兰克·赫伯特', '刘慈欣', '刘慈欣', '安迪·威尔', '艾萨克·阿西莫夫', '石黑一雄', '斯坦尼斯瓦夫·莱姆', '艾萨克·阿西莫夫', '刘慈欣', '刘慈欣', '刘慈欣', '田中芳树', '刘慈欣', '阿瑟·克拉克', '刘慈欣', '菲利普·迪克', '丹·西蒙斯']
    authors5 = ['埃里希·玛丽亚·雷马克', '刘猛', '周健良', '梅雨情歌', '南派三叔 乾坤', '刘猛', '高铭', '未上膛的子弹', '尚武', '梅雨情歌', '李枭', '汤姆·克兰西', '李建林', '梅雨情歌', '黄贺', '梅雨情歌', '魏笑宇', '尚武']
    authors6 = ['烽火戏诸侯', '江南', '江南', '白姬绾', '烽火戏诸侯', '裟椤双树', '安杰伊·萨普科夫斯基', '佟婕', '沧月', '夏忆', '凤歌', '说不得大师', '江南', '周木楠', 'J.R.R.托尔金', '烟雨江南', '沧月', '裟椤双树', '夜黑羽']
    authors7 = ['小叙', '骑马钓鱼', '莫言', '李诣凡', '南无袈裟理科佛', '潘海根', '老八零2', '南无袈裟理科佛', '微凉维夏', '马六甲_', '锦若', '夏忆', '老柒哥', '桐木 贺宝栋等', '南无袈裟理科佛', '宇文长弓', '南无袈裟理科佛', '奔放的程序员', '丹·布朗', '夜黑羽']
    authors8 = ['小桥老树', '桃花渡', '叶真中显', '耳东水寿', '高野和明', '柯南·道尔', '阿加莎·克里斯蒂', '耳东水寿', '蜘蛛', '何马', '法医秦明', '雷米', '南无袈裟理科佛', '紫金陈', '马伯庸', '常书欣', '法医秦明', '罗伯特·麦卡蒙', '江南道长', '何马']
    authors9 = ['哈珀·李', '大仲马', '毛姆', '陀思妥耶夫斯基', '钱钟书', '简·奥斯丁', '列夫·托尔斯泰', '玛格丽特·米切尔', '陀思妥耶夫斯基', '雨果', '奥斯特洛夫斯基', '蒙哥马利', '福楼拜', '奥尔科特', '大仲马', '司汤达', '毛姆', '凡尔纳', '夏洛蒂·勃朗特', '列夫·托尔斯泰']
    authors10 = ['米兰·昆德拉', '卡勒德·胡赛尼', '毛姆', '毛姆', '弗雷德里克·巴克曼', '村上春树', '中岛敦', '罗伯特·M.波西格', '埃莱娜·费兰特', '迪莉娅·欧文斯', '弗朗西斯·司各特·菲茨杰拉德', '蕾秋·乔伊斯', '米哈依尔·肖洛霍夫', '雷蒙德·钱德勒', '加缪', '伍绮诗', '毛姆', '弗雷德里克·巴克曼', '小川糸', '毛姆']
    authors11 = ['黄易', '黄易', '古龙', '黄易', '黄易', '梁羽生', '古龙', '古龙', '凤歌', '藤萍', '古龙', '古龙']
    # s1=s2=s3=s4=s5=s6=s7=s8=s9=s10=s11=0
    # for i in range(0,len(data_social_novel)):
    #     s1+=int(data_social_novel[i][3])
    # ave_numReader.append(s1//len(data_social_novel))
    # for i in range(0,len(data_love)):
    #     s2+=int(data_love[i][3])
    # ave_numReader.append(s2// len(data_love))
    # for i in range(0, len(data_yonth)):
    #     s3 += int(data_yonth[i][3])
    # ave_numReader.append(s3 // len(data_yonth))
    # for i in range(0, len(data_science_fic)):
    #     s4 += int(data_science_fic[i][3])
    # ave_numReader.append(s4 // len(data_science_fic))
    # for i in range(0, len(data_war)):
    #     s5 += int(data_war[i][3])
    # ave_numReader.append(s5 // len(data_war))
    # for i in range(0, len(data_magic)):
    #     s6 += int(data_magic[i][3])
    # ave_numReader.append(s6 // len(data_magic))
    # for i in range(0, len(data_fear)):
    #     s7 += int(data_fear[i][3])
    # ave_numReader.append(s7 // len(data_fear))
    # for i in range(0, len(data_detective)):
    #     s8 += int(data_detective[i][3])
    # ave_numReader.append(s8 // len(data_detective))
    # for i in range(0, len(data_master)):
    #     s9 += int(data_master[i][3])
    # ave_numReader.append(s9 // len(data_master))
    # for i in range(0, len(data_foreign)):
    #     s10 += int(data_foreign[i][3])
    # ave_numReader.append(s10 // len(data_foreign))
    # for i in range(0, len(data_wuxia)):
    #     s11 += int(data_wuxia[i][3])
    # ave_numReader.append(s11 // len(data_wuxia))
    # print(ave_numReader)

    # s1=s2=s3=s4=s5=s6=s7=s8=s9=s10=s11=0
    # for i in range(0,len(data_social_novel)):
    #     s1+=float(data_social_novel[i][2].strip('%'))
    # ave_percent.append(s1//len(data_social_novel))
    # for i in range(0,len(data_love)):
    #     s2+=float(data_love[i][2].strip('%'))
    # ave_percent.append(s2// len(data_love))
    # for i in range(0, len(data_yonth)):
    #     s3 += float(data_yonth[i][2].strip('%'))
    # ave_percent.append(s3 // len(data_yonth))
    # for i in range(0, len(data_science_fic)):
    #     s4 += float(data_science_fic[i][2].strip('%'))
    # ave_percent.append(s4 // len(data_science_fic))
    # for i in range(0, len(data_war)):
    #     s5 += float(data_war[i][2].strip('%'))
    # ave_percent.append(s5 // len(data_war))
    # for i in range(0, len(data_magic)):
    #     s6 += float(data_magic[i][2].strip('%'))
    # ave_percent.append(s6 // len(data_magic))
    # for i in range(0, len(data_fear)):
    #     s7 += float(data_fear[i][2].strip('%'))
    # ave_percent.append(s7 // len(data_fear))
    # for i in range(0, len(data_detective)):
    #     s8 += float(data_detective[i][2].strip('%'))
    # ave_percent.append(s8 // len(data_detective))
    # for i in range(0, len(data_master)):
    #     s9 += float(data_master[i][2].strip('%'))
    # ave_percent.append(s9 // len(data_master))
    # for i in range(0, len(data_foreign)):
    #     s10 += float(data_foreign[i][2].strip('%'))
    # ave_percent.append(s10 // len(data_foreign))
    # for i in range(0, len(data_wuxia)):
    #     s11 += float(data_wuxia[i][2].strip('%'))
    # ave_percent.append(s11 // len(data_wuxia))
    # print(ave_percent)
    # s1=s2=s3=s4=s5=s6=s7=s8=s9=s10=s11=0
    # for i in range(0,len(data_social_novel)):
    #     s1+=float(data_social_novel[i][2].strip('%'))
    # ave_percent.append(s1//len(data_social_novel))
    # for i in range(0,len(data_love)):
    #     s2+=float(data_love[i][2].strip('%'))
    # ave_percent.append(s2// len(data_love))
    # for i in range(0, len(data_yonth)):
    #     s3 += float(data_yonth[i][2].strip('%'))
    # ave_percent.append(s3 // len(data_yonth))
    # for i in range(0, len(data_science_fic)):
    #     s4 += float(data_science_fic[i][2].strip('%'))
    # ave_percent.append(s4 // len(data_science_fic))
    # for i in range(0, len(data_war)):
    #     s5 += float(data_war[i][2].strip('%'))
    # ave_percent.append(s5 // len(data_war))
    # for i in range(0, len(data_magic)):
    #     s6 += float(data_magic[i][2].strip('%'))
    # ave_percent.append(s6 // len(data_magic))
    # for i in range(0, len(data_fear)):
    #     s7 += float(data_fear[i][2].strip('%'))
    # ave_percent.append(s7 // len(data_fear))
    # for i in range(0, len(data_detective)):
    #     s8 += float(data_detective[i][2].strip('%'))
    # ave_percent.append(s8 // len(data_detective))
    # for i in range(0, len(data_master)):
    #     s9 += float(data_master[i][2].strip('%'))
    # ave_percent.append(s9 // len(data_master))
    # for i in range(0, len(data_foreign)):
    #     s10 += float(data_foreign[i][2].strip('%'))
    # ave_percent.append(s10 // len(data_foreign))
    # for i in range(0, len(data_wuxia)):
    #     s11 += float(data_wuxia[i][2].strip('%'))
    # ave_percent.append(s11 // len(data_wuxia))
    # print(ave_percent)
    #getBar(catalog,ave_numReader)


    # for i in range(0,len(data_social_novel)):
    #     s1+=int(data_social_novel[i][4])
    # movie_num.append(s1)
    # for i in range(0,len(data_love)):
    #     s2+=int(data_love[i][4])
    # movie_num.append(s2)
    # for i in range(0, len(data_yonth)):
    #     s3 += int(data_yonth[i][4])
    # movie_num.append(s3)
    # for i in range(0, len(data_science_fic)):
    #     s4 += int(data_science_fic[i][4])
    # movie_num.append(s4)
    # for i in range(0, len(data_war)):
    #     s5 += int(data_war[i][4])
    # movie_num.append(s5)
    # for i in range(0, len(data_magic)):
    #     s6 += int(data_magic[i][4])
    # movie_num.append(s6)
    # for i in range(0, len(data_fear)):
    #     s7 += int(data_fear[i][4])
    # movie_num.append(s7)
    # for i in range(0, len(data_detective)):
    #     s8 += int(data_detective[i][4])
    # movie_num.append(s8)
    # for i in range(0, len(data_master)):
    #     s9 += int(data_master[i][4])
    # movie_num.append(s9)
    # for i in range(0, len(data_foreign)):
    #     s10 += int(data_foreign[i][4])
    # movie_num.append(s10)
    # for i in range(0, len(data_wuxia)):
    #     s11 += int(data_wuxia[i][4])
    # movie_num.append(s11)
    # print(movie_num)


    # for i in range(0,len(data_social_novel)):
    #     authors1.append(data_social_novel[i][1])
    # print(authors1)
    # for i in range(0,len(data_love)):
    #     authors2.append(data_love[i][1])
    # print(authors2)
    #
    # for i in range(0, len(data_yonth)):
    #     authors3 .append(data_yonth[i][1])
    # print(authors3)
    # for i in range(0, len(data_science_fic)):
    #     authors4.append(data_science_fic[i][1])
    # print(authors4)
    # for i in range(0, len(data_war)):
    #     authors5.append(data_war[i][1])
    # print(authors5)
    # for i in range(0, len(data_magic)):
    #     authors6.append(data_magic[i][1])
    # print(authors6)
    # for i in range(0, len(data_fear)):
    #     authors7.append(data_fear[i][1])
    # print(authors7)
    # for i in range(0, len(data_detective)):
    #     authors8.append(data_detective[i][1])
    # print(authors8)
    # for i in range(0, len(data_master)):
    #     authors9.append(data_master[i][1])
    # print(authors9)
    # for i in range(0, len(data_foreign)):
    #     authors10.append(data_foreign[i][1])
    # print(authors10)
    # for i in range(0, len(data_wuxia)):
    #     authors11.append(data_wuxia[i][1])
    # print(authors11)



    dic1=dict(collections.Counter(authors1))
    a1=Counter(dic1).most_common()
    print(a1)
    dic2 = dict(collections.Counter(authors2))
    a2 = Counter(dic2).most_common()
    print(a2)
    dic3 = dict(collections.Counter(authors3))
    a3 = Counter(dic3).most_common()
    print(a3)
    dic4 = dict(collections.Counter(authors4))
    a4 = Counter(dic4).most_common()
    print(a4)
    dic5 = dict(collections.Counter(authors5))
    a5 = Counter(dic5).most_common()
    print(a5)
    dic6 = dict(collections.Counter(authors6))
    a6 = Counter(dic6).most_common()
    print(a6)
    dic7 = dict(collections.Counter(authors7))
    a7 = Counter(dic7).most_common()
    print(a7)
    dic8 = dict(collections.Counter(authors8))
    a8 = Counter(dic8).most_common()
    print(a8)
    dic9 = dict(collections.Counter(authors9))
    a9 = Counter(dic9).most_common()
    print(a9)
    dic10 = dict(collections.Counter(authors10))
    a10 = Counter(dic10).most_common()
    print(a10)
    dic11 = dict(collections.Counter(authors11))
    a11 = Counter(dic11).most_common()
    print(a11)


    bar = Bar()
    bar.set_global_opts(title_opts=opts.TitleOpts(title="微信读书各类书籍前20平均推荐度柱状图"))
    bar.add_xaxis(catalog)
    bar.add_yaxis("平均推荐指数", ave_percent, color='palevioletred')
    #bar.render("percent.html")
    bar1 = Bar()
    bar1.set_global_opts(title_opts=opts.TitleOpts(title="微信读书各类书籍前20平均阅读人数柱状图"))
    bar1.add_xaxis(catalog)
    bar1.add_yaxis("平均阅读人数", ave_numReader, color='pink')
    #bar1.render("reader.html")
    # grid=Grid()
    # grid.add(bar)
    # grid.add(bar1)
    # grid.render("Bookdata.html")
    p1=Pie()
    p1.set_global_opts(title_opts=opts.TitleOpts(title="微信读书各类前20中改编为影视作品的书籍数量"))
    p1.add("",[list(z) for z in zip(catalog,movie_num)])

    w1=WordCloud()
    w1.add("",a1)
    w1.set_global_opts(title_opts=opts.TitleOpts(title="作者词云——社会书籍"))
    w2 = WordCloud()
    w2.add("", a2)
    w2.set_global_opts(title_opts=opts.TitleOpts(title="作者词云——情感小说"))
    w3 = WordCloud()
    w3.add("", a3)
    w3.set_global_opts(title_opts=opts.TitleOpts(title="作者词云——青春文学"))
    w4 = WordCloud()
    w4.add("", a4)
    w4.set_global_opts(title_opts=opts.TitleOpts(title="作者词云——科幻经典"))
    w5 = WordCloud()
    w5.add("", a5)
    w5.set_global_opts(title_opts=opts.TitleOpts(title="作者词云——战争军旅"))
    w6 = WordCloud()
    w6.add("", a6)
    w6.set_global_opts(title_opts=opts.TitleOpts(title="作者词云——玄幻精品"))
    w7 = WordCloud()
    w7.add("", a7)
    w7.set_global_opts(title_opts=opts.TitleOpts(title="作者词云——恐怖惊悚"))
    w8 = WordCloud()
    w8.add("", a8)
    w8.set_global_opts(title_opts=opts.TitleOpts(title="作者词云——悬疑推理"))
    w9 = WordCloud()
    w9.add("", a9)
    w9.set_global_opts(title_opts=opts.TitleOpts(title="作者词云——世界名著"))
    w10 = WordCloud()
    w10.add("", a10)
    w10.set_global_opts(title_opts=opts.TitleOpts(title="作者词云——外国文学"))
    w11 = WordCloud()
    w11.add("", a11)
    w11.set_global_opts(title_opts=opts.TitleOpts(title="作者词云——武侠经典"))
    page_Authors=Page(layout=Page.DraggablePageLayout)
    page_Authors.add(w1)
    page_Authors.add(w2)
    page_Authors.add(w3)
    page_Authors.add(w4)
    page_Authors.add(w5)
    page_Authors.add(w6)
    page_Authors.add(w7)
    page_Authors.add(w8)
    page_Authors.add(w9)
    page_Authors.add(w10)
    page_Authors.add(w11)
    page_Authors.render("WordCloud_authors.html")


    # page = Page(layout=Page.DraggablePageLayout)
    # page.add(bar)
    # page.add(bar1)
    # page.add(p1)
    # page.render("BookData.html")