import re
from numpy import *
import pandas as ps
import json
# from homeData import getAllData
from .query import querys
import jieba
import re

from pymysql import *

conn = connect(host='localhost',user='root',password='123456',database='movie_result',port=3306)
cursor = conn.cursor()

def querys(sql,params,type='no_select'):
    params = tuple(params)
    cursor.execute(sql,params)
    if type != 'no_select':
        data_list = cursor.fetchall()  #从数据库中获取所以的movies
        conn.commit()
        return data_list
    else:
        conn.commit()
        return '数据库语句执行成功'

def getAllData(): #获取全部movie的数据 并返回一个列表
    def map_fn(item):
        item = list(item)
        if item[1] == None:
            item[1] = '无'
        else:
            item[1] = item[1].split(',')
        if item[4] == None:
            item[4] = '无'
        else:
            item[4] = item[4].split(',')
        item[7] = item[7].split(',')
        if item[8] == None:
            item[8] = '中国大陆'
        else:
            item[8] = item[8].split(',')
        if item[9] == None:
            item[9] = '汉语普通话'
        else:
            item[9] = item[9].split(',')
        item[13] = item[13].split(',')
        item[16] = item[16].split(',')
        item[15] = json.loads(item[15])
        return item
    allData = querys('select * from movies_copy',[],'select')
    allData = list(map(map_fn,list(allData)))
    return allData

item = getAllData()
df = ps.DataFrame(item, columns=[
    'id',
    'directors',
    'rate',
    'title',
    'casts',
    'cover',
    'year',
    'types',
    'country',
    'lang',
    'time',
    'moveiTime',
    'comment_len',
    'starts',
    'summary',
    'comments',
    'imgList',
    'movieUrl',
    'detailLink'
])
all_comment = querys('select * from comments', [], 'select')
#可以将每个不同的电影都分出来，然后对其评论一一分词最后生成一个词云图
def clear(name):
    item = ps.DataFrame(all_comment, columns=['id', 'movie_name', 'comment'])
    movie = item[item['movie_name'] == name]
    comments = movie['comment']
    comment_cut = []
    comment_jieba_result = []
    for comment in comments:
        comment = comment.replace('\n', '').replace('\t', '').replace(' ', '').replace('\xa0', '')
        comment_cut = jieba.lcut(comment)
        comment_jieba_result.append(comment_cut)
    with open('myutils/baidu_stopwords.txt', encoding='utf-8') as f:
        con = f.readlines()
        stop_words = set()
        for i in con:
            i = i.replace("\n", "")  # 去掉读取每一行数据的\n
            stop_words.add(i)
    result_list = []
    for word in comment_cut:
        # print(word)
        if word not in stop_words and len(word) > 1:
            result_list.append(word)
    comment_types = {}
    for item in result_list:
        if item not in comment_types:
            comment_types[item] = 1
        else:
            comment_types[item] +=1
    data_comment = [{"name": word, "value": count} for word, count in comment_types.items()]
    print(data_comment)
    return data_comment




# all_comment = clear('兰心大剧院')

def getwordcloud():
    movie_types = df.loc[:, "types"]
    language_types = df.loc[:, "lang"]
    movie_type = {}
    language_type = {}
    for item in movie_types:
        for detail in item:
            if detail not in movie_type:
                movie_type[detail] = 1
            else:
                movie_type[detail] += 1
    for item in language_types:
        for detail in item:
            if detail not in language_type:
                language_type[detail] = 1
            else:
                language_type[detail] += 1
    data_movie = [{"name": word, "value": count} for word, count in movie_type.items()]
    data_language = [{"name": language, "value": count} for language, count in language_type.items()]
    return data_movie, data_language


# getwordcloud()