import pandas as pd
import requests
import xlrd
from lxml import etree  # 导入etree子模块
from bs4 import BeautifulSoup  # 导入BeautifulSoup库
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans
from sklearn.manifold import TSNE
from sklearn.preprocessing import MinMaxScaler
from wordcloud import WordCloud
from PIL import Image
import jieba

"""
@ulr 待爬数据的网址
"""
url = ['http://cba.sports.sina.com.cn/cba/stats/playerstats/',
       'http://cba.sports.sina.com.cn/cba/stats/teamrank/']

"""
requests请求获取response
"""


def getRes(num):
    response = requests.get(url[num])
    # 中文爬取编码
    response.encoding = 'GBK'
    return response


"""
获取状态码
"""


def getStatus(num):
    return getRes(num).status_code


"""
获取网络地址
"""


def getUrl(num):
    return getRes(num).url


"""
获取请求头
"""


def getHeaders(num):
    return getRes(num).headers


"""
获取cookie信息
"""


def getCookie(num):
    return getRes(num).cookies


"""
获取返回的text信息
"""


def getText(num):
    return getRes(num).text


"""
爬取CBA21-22赛季球员个人技术统计，并存储为excel表
"""


def getData(num):
    """
    @ html_str html文本信息
    @ html 解析的html字符串
    @ table_title 表格title
    @ list1 中间变量集合
    """
    html_str = getText(num)
    html = etree.HTML(html_str)  # 解析html字符串
    # 获取所有class="level"的div节点中的文本信息
    table_title = html.xpath('//h2[@class="blk_title title_line"]//span/text()')[0][0:10]  # 表格title
    list1 = []
    # 表格的实体信息
    theads = html.xpath('//div[@class="blk_wrap blk01_wrap"]//table[@id="table01"]//thead//tr//th/text()') + html.xpath(
        '//div[@class="blk_wrap blk01_wrap"]//table[@id="table01"]//thead//tr//th//a/text()')
    tbody = html.xpath('//div[@class="blk_wrap blk01_wrap"]//table[@id="table01"]//tbody//tr')
    # 所有球员的数据
    for i in tbody:
        tbody_txt = etree.tostring(i, encoding='utf-8')
        onedatahtml = tbody_txt.decode('utf-8')
        soup = BeautifulSoup(onedatahtml, features="lxml")
        list2 = []
        for str1 in soup.stripped_strings:
            list2.append(str1)
        list1.append(list2)
    # 解决数据输出时列名不对齐的问题
    pd.set_option('display.unicode.east_asian_width', True)
    data = []
    for i in list1:
        # print(i)
        data.append(i)
    columns = [theads]
    df = pd.DataFrame(data=data, columns=columns)
    df.to_excel("../dataend/player.xlsx", sheet_name=table_title[0:16])


"""
爬取CBA21-22赛季球队技术统计，并存储为excel表
"""


def getData2(num):
    """
    @ html_str html文本信息
    @ html 解析的html字符串
    @ table_title 表格title
    @ list1 中间变量集合
    """
    html_str = getText(num)
    html = etree.HTML(html_str)  # 解析html字符串
    # 获取所有class="level"的div节点中的文本信息
    table_title = html.xpath('//h2[@class="blk_title title_line"]//span/text()')[0][0:10]  # 表格title
    list1 = []
    # 表格的实体信息
    theads = html.xpath('//div[@class="blk_wrap"]//table//thead//tr//th/text()')
    tbody = html.xpath('//div[@class="blk_wrap"]//table//tbody//tr')

    # 所有球队的数据
    for i in tbody:
        tbody_txt = etree.tostring(i, encoding='utf-8')
        onedatahtml = tbody_txt.decode('utf-8')
        soup = BeautifulSoup(onedatahtml, features="lxml")
        list2 = []
        for str1 in soup.stripped_strings:
            list2.append(str1)
        list1.append(list2)
    # 解决数据输出时列名不对齐的问题
    pd.set_option('display.unicode.east_asian_width', True)
    data = []
    for i in list1:
        data.append(i)
    columns = [theads]
    df = pd.DataFrame(data=data, columns=columns)
    df.to_excel("../dataend/team.xlsx", sheet_name=table_title[0:16])


"""
数据读取
"""


def readExcel(datapath):
    # 读取Excel文件
    pd.set_option("display.unicode.east_asian_width", True)
    df = pd.read_excel(datapath)
    return df


"""
@sentence 需要拆分为词云的句子
利用jieba包拆分词云
"""


def seg_sentence(sentence):
    sentence_seged = jieba.cut(sentence.strip())
    outstr = ""
    for word in sentence_seged:
        if word != '\t':
            outstr += word
            outstr += " "
    return outstr.strip()


"""
生成词云
@inputname 输入文档名
@outputname 输出文档名
@photoname 词云图名
"""


def wordcloud(inputname, outputname, photoname):
    inputs1 = open("../dataend/wordcloud/input/" + inputname, "r", encoding="utf-8")
    outputs = open("../dataend/wordcloud/input/" + outputname, "w", encoding="utf-8")
    for line in inputs1:
        line_seg = seg_sentence(line)
        outputs.write(line_seg + "\n")
    outputs.close()
    inputs1.close()
    mask = np.array(Image.open("../dataend/wordcloud/input/mask.jpg"))  # 底图
    inputs2 = open("../dataend/wordcloud/input/" + outputname, "r", encoding="utf-8")
    mytext = inputs2.read()
    wordcloud = WordCloud(mask=mask,
                          width=3000,
                          height=3000,
                          background_color="white",
                          margin=1,
                          max_words=300,
                          min_font_size=10,
                          max_font_size=None,
                          repeat=False,
                          font_path="../dataend/wordcloud/font/FZKaTong-M19S.ttf"
                          ).generate(mytext)
    wordcloud.to_file("../dataend/" + photoname)
    inputs2.close()
    plt.figure(dpi=150)
    plt.imshow(wordcloud)
    plt.axis("off")
    plt.show()


"""
@KMean算法
球员综合能力聚类分析
"""


def KMean():
    playDataAll = readExcel("../dataend/player.xlsx")

    player_data = playDataAll[['场次', '得分', '篮板', '前篮板',
                               '后篮板', '助攻', '抢断', '扣篮', '盖帽',
                               '失误', '犯规', '效率']]
    player_array = np.array(player_data)
    player_list = player_array.tolist()

    play_target = list(playDataAll['综合能力'])
    play_names = ['场次', '得分', '篮板', '前篮板',
                  '后篮板', '助攻', '抢断', '扣篮', '盖帽',
                  '失误', '犯规', '效率']
    plt.rcParams["font.sans-serif"] = ["SimHei"]
    scale = MinMaxScaler().fit(player_list)
    player_dataScale = scale.transform(player_list)
    kmeans = KMeans(n_clusters=5, random_state=123).fit(player_dataScale)
    tsne = TSNE(n_components=2, init='random', random_state=177).fit(player_list)
    df = pd.DataFrame(tsne.embedding_)
    df['labels'] = kmeans.labels_
    df1 = df[df['labels'] == 1]
    df2 = df[df['labels'] == 2]
    df3 = df[df['labels'] == 3]
    df4 = df[df['labels'] == 4]
    df5 = df[df['labels'] == 5]
    fig = plt.figure(figsize=(10, 10))
    plt.plot(df1[0], df1[1], 'bo', df2[0], df2[1], 'r*', df3[0], df3[1], 'gD', df4[0], df4[1], 'yd', df5[0], df5[1],
             'mh')
    plt.savefig('../dataend/球员综合能力聚类分析图.png')
    plt.show()


"""
基于熵权的TOPSIS 对球员综合能力进行打分评价
"""

"""
@读取数据，由于球员较多 我们选择得分在15分以上的球员进行 熵权TOPSIS分析 单独进行数据处理
file文件路径
column 指标个数
"""


def readfile(filepath, column):
    wb = xlrd.open_workbook(filename=filepath)  # 打开文件
    sheet = wb.sheet_by_index(0)  # 通过索引获取表格
    rows = sheet.nrows  # 获取行数
    all_content = []  # 存放读取的数据
    for j in range(1, column + 1):  # 取第1~column第列对的数据
        temp = []
        for i in range(1, rows):
            cell = float(sheet.cell_value(i, j))  # 获取数据
            temp.append(cell)
        all_content.append(temp)  # 按列添加到结果集中
    return np.array(all_content)


"""
@计算指标权重
input 数据指向一致矩阵
column 指标个数
"""


def getweight(data0, column):
    # 样本数，指标个数
    n, m = np.shape(data0)
    # 归一化处理
    maxium = np.max(data0, axis=1)
    minium = np.min(data0, axis=1)
    for i in range(0, column):
        data0[i] = (data0[i] - minium[i]) * 1.0 / (maxium[i] - minium[i])
    # 计算第j项指标，第i个样本占该指标的比重
    sumzb = np.sum(data0, axis=1)
    for i in range(0, column):
        data0[i] = data0[i] / sumzb[i]
    # 对ln0处理
    a = data0 * 1.0
    # 0的异常处理
    a[np.where(data0 == 0)] = 0.0001
    #    #计算每个指标的熵
    e = (-1.0 / np.log(m)) * np.sum(data0 * np.log(a), axis=1)
    #    #计算权重
    w = (1 - e) / np.sum(1 - e)
    return w


"""
@极小型指标正向化
input 对应极小型指标的数据
"""


#  极小型指标 -> 极大型指标
def min_revolute_max(datas):
    maxium = np.max(datas)
    minium = np.min(datas)
    return (np.max(datas) - datas) / (maxium - minium)  # 套公式


# 中间型指标 -> 极大型指标
def dataDirection_2(datas, x_best):
    temp_datas = datas - x_best
    M = np.max(abs(temp_datas))
    answer_datas = 1 - abs(datas - x_best) / M  # 套公式
    return answer_datas


# 区间型指标 -> 极大型指标
def dataDirection_3(datas, x_min, x_max):
    M = max(x_min - np.min(datas), np.max(datas) - x_max)
    answer_list = []
    for i in datas:
        if (i < x_min):
            answer_list.append(1 - (x_min - i) / M)  # 套公式
        elif (x_min <= i <= x_max):
            answer_list.append(1)
        else:
            answer_list.append(1 - (i - x_max) / M)
    return np.array(answer_list)


"""
@数据矩阵标准化
 datas正向化的数据矩阵
"""


# 数据矩阵标准化
def normnums(datas):
    K = np.power(np.sum(pow(datas, 2), axis=1), 0.5)
    for i in range(0, K.size):
        for j in range(0, datas[i].size):
            datas[i, j] = datas[i, j] / K[i]  # 套用矩阵标准化的公式
    return datas


"""
@指标得分计算

answer 标准化的数据矩阵
w 权重矩阵
column 指标列数
"""


def getscore(answer, w, column):
    list_max = []
    for i in answer:
        list_max.append(np.max(i[:]))  # 获取每一列的最大值
    list_max = np.array(list_max)
    list_min = []
    for i in answer:
        list_min.append(np.min(i[:]))  # 获取每一列的最小值
    list_min = np.array(list_min)
    max_list = []  # 存放第i个评价对象与最大值的距离
    min_list = []  # 存放第i个评价对象与最小值的距离
    answer_list = []  # 存放评价对象的未归一化得分
    for k in range(0, np.size(answer, axis=1)):  # 遍历每一列数据
        max_sum = 0
        min_sum = 0
        for q in range(0, column):  # 有?个指标
            max_sum += w[q] * np.power(answer[q, k] - list_max[q], 2)  # 按每一列计算Di+
            min_sum += w[q] * np.power(answer[q, k] - list_min[q], 2)  # 按每一列计算Di-
        max_list.append(pow(max_sum, 0.5))
        min_list.append(pow(min_sum, 0.5))
        answer_list.append(min_list[k] / (min_list[k] + max_list[k]))  # 套用计算得分的公式 Si = (Di-) / ((Di+) +(Di-))
    answer = np.array(answer_list)  # 得分归一化
    return answer


""""
计算球员TOPSIS的综合得分
@ datapath 原始数据路径
@ outpath 输出结果路径
"""


def outscore(datapath, outpath):
    filepath = datapath
    playernames = readExcel(filepath)['球员']
    answer1 = readfile(filepath, 15)  # 读取文件 15 代表15个指标数量
    answer2 = answer1.copy()
    for i in range(0, 15):  # 按照不同的列，根据不同的指标转换为极大型指标
        if i >= 13:  # 极小型指标
            answer1[i] = min_revolute_max(answer1[i])  # 将极小型指标正向化
        else:  # 本来就是极大型指标，不用转换
            answer1[i] = answer1[i]
    answer3 = normnums(answer1)  # 正向数组标准化 标准化结果
    w = getweight(answer2, 15)  # 计算权重 权重结果
    answer4 = getscore(answer3, w, 15)  # topsis
    data = pd.DataFrame(data=answer4, index=playernames)  # 计算得分
    # 将得分输出到excel表格中
    writer = pd.ExcelWriter(outpath)  # 写入Excel文件
    data.to_excel(writer, '得分', float_format='%.5f')
    writer.save()
    writer.close()


"""
TOPSIS得分前十球员条形图
@ picname
"""


def getPlayPic1(picname):
    # 对得分表按照TOPSIS综合得分进行排序并绘制前10的球员的图像
    TOPSISPlayers = readExcel("../dataend/playerSorce.xlsx")
    TOPSISPlayers = TOPSISPlayers.sort_values(by="TOPSIS得分", ascending=False)  # by指定按哪列排序。ascending表示是否升序
    x = list(TOPSISPlayers["球员"][0:10])
    y = list(TOPSISPlayers["TOPSIS得分"][0:10])
    sns.set_style("darkgrid")
    fig = sns.barplot(x=x, y=y)

    plt.xlabel("CBA2021-2022赛季球员综合得分")
    plt.ylabel("TOPSIS综合得分")
    plt.rcParams["font.sans-serif"] = ["SimHei"]
    plt.figure(figsize=(10, 10))
    scatter_fig = fig.get_figure()
    scatter_fig.savefig("../dataend/" + picname + ".png", dpi=400)
    plt.show()


"""
TOPSIS得分前十折线图
@ picname 图片名字
"""


def getPlayPic2(picname):
    # 对得分表按照TOPSIS综合得分进行排序并绘制前10的球员的图像
    TOPSISPlayers = readExcel("../dataend/playerSorce.xlsx")
    TOPSISPlayers = TOPSISPlayers.sort_values(by="TOPSIS得分", ascending=False)  # by指定按哪列排序。ascending表示是否升序
    x = list(TOPSISPlayers["球员"][0:10])
    y = list(TOPSISPlayers["TOPSIS得分"][0:10])
    sns.set_style("darkgrid")
    fig = sns.lineplot(x=x, y=y)
    plt.rcParams["font.sans-serif"] = ["SimHei"]
    plt.figure(figsize=(10, 10))
    scatter_fig = fig.get_figure()
    scatter_fig.savefig("../dataend/" + picname + ".png", dpi=400)
    plt.show()


"""
球员雷达图
"""


def getradar():
    # 雷达图
    data0 = ['场次', '得分', '二分', '三分',
             '罚球', '篮板', '前篮板', '后篮板',
             '助攻', '抢断', '扣篮', '盖帽', '效率',
             '失误', '犯规'
             ]
    scores = [38, 100, 100, 84,
              100, 26, 21,
              27, 100, 33,
              3, 0, 94,
              25, 60
              ]
    datalength = len(scores)
    angles0 = np.linspace(0, 2 * np.pi, datalength, endpoint=False)  # 均分圾坐标
    angles = np.linspace(0, 2 * np.pi, datalength, endpoint=False)  # 均分极坐标
    scores.append(scores[0])  # 在末尾添加第一个值，保证曲线闭合
    angles = np.append(angles, angles[0])
    plt.rcParams["font.sans-serif"] = ["SimHei"]
    plt.polar(angles, scores, 'b', lw=2)
    plt.thetagrids(angles0 * 180 / np.pi, data0, fontproperties=' simhei')
    plt.fill(angles, scores, facecolor='b', alpha=0.4)
    plt.ylim(0, 100)
    plt.savefig("../dataend/radarpic.png")
    plt.show()


"""
球队胜率饼图
"""


def getpie():
    # 饼图
    playerall = readExcel("../dataend/team.xlsx")
    cities = playerall["球队"][0:5]
    winrate = np.array(playerall["胜率"][0:5])
    winrateone = winrate / winrate.sum()
    plt.rcParams["font.sans-serif"] = ["SimHei"]
    plt.figure(figsize=(5, 5))
    explode = [0.1, 0.01, 0.01, 0.01, 0.01]
    plt.pie(winrateone, explode=explode, labels=cities, autopct="%1.1f%%")
    plt.title("CBA前五球队胜率饼图")
    plt.savefig("../dataend/winpie.png")
    plt.show()


"""
球队胜负场箱线图
"""


def getbplot():
    playerall = readExcel("../dataend/team.xlsx")
    plt.rcParams["font.sans-serif"] = ["SimHei"]
    winrounds = np.array(playerall["胜"])
    loserounds = np.array(playerall["负"])
    plt.boxplot([winrounds, loserounds], labels=['胜', '负'])
    plt.title("CBA球队胜负场箱线图")
    plt.savefig("../dataend/timebplot.png")
    plt.show()


"""
主程序
"""


def main():
    status = [getStatus(0), getStatus(1)]
    print("待爬网址的响应状态码:")
    print(status)

    headers = [getHeaders(0), getHeaders(1)]
    print("待爬网址的请求头信息:")
    print(headers)

    cookie = [getCookie(0), getCookie(1)]
    print("待爬网址的cookie信息:")
    print(cookie)

    # 已爬球员数据读取

    players = readExcel("../dataend/player.xlsx")

    print("球员数据展示:")
    print(players)

    # 已爬球队数读取

    teams = readExcel("../dataend/team.xlsx")

    print("球队数据展示:")
    print(teams)

    # CBA介绍词云图
    wordcloud("input.txt", "output.txt", "CBACloud.jpg")
    # 熵权TOPSIS词云图
    wordcloud("input2.txt", "output2.txt", "shangquanTOPSIS.jpg")

    # 球员聚类分析图
    KMean()

    # 球员熵权TOPSIS得分计算
    outscore("../dataend/TOPSISPlayer.xlsx", "../dataend/playerSorce.xlsx")

    # TOPSIS得分前十球员条形图
    getPlayPic1("TOPSIS得分前十条形图图")

    # TOPSIS得分前十折线图
    getPlayPic2("TOPSIS得分前十折线图")

    # 球员雷达图
    getradar()

    # 球队胜率饼图
    getpie()
    # 球队胜负场箱线图
    getbplot()


main()
