import os
import pandas as pd
import json
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from datetime import datetime


provinces = ['北京', '天津', '河北', '山西', '内蒙古', '辽宁', '吉林', '黑龙江', '上海', '江苏', '浙江', '安徽', '福建', '江西', '山东', '河南', '湖北', '湖南', '广东', '广西壮族', '海南', '重庆', '四川', '贵州', '云南', '西藏', '陕西', '甘肃', '青海', '宁夏回族', '新疆维吾尔', '台湾', '香港', '澳门']

def deal_hot_map():
    to_csv_name = 'basic.csv'
    to_csv_path = os.path.join(os.path.abspath(
        os.path.join(os.path.join(os.path.dirname(__file__), ".."), 'static/spider/data')),
        to_csv_name)
    # pd_datas = pd.read_csv(to_csv_path, encoding='utf-8').to_dict()
    pd_datas = pd.read_csv(to_csv_path, encoding='utf-8').to_dict(orient='records')
    to_data = []
    for pd_data in pd_datas:
        districtId = pd_data['districtId']
        parentName = pd_data['parentName'] if pd_data['parentName'] != '中国' else pd_data['districtName']
        if parentName in provinces:
            # print(parentName)
            sight_file_name = '{}_sight.csv'.format(districtId)
            sight_file_path = os.path.join(os.path.abspath(
                os.path.join(os.path.join(os.path.dirname(__file__), ".."), 'static/spider/data/sight')),
                sight_file_name)
            if os.path.exists(sight_file_path):
                # sight_datas = pd.read_csv(sight_file_path, encoding='utf-8').to_dict()
                sight_datas = pd.read_csv(sight_file_path, encoding='utf-8').to_dict(orient='records')
                hot = 0.0
                name_exists = False
                for i, data in enumerate(to_data):
                    if data['name'] == parentName:
                        hot += float(data['value'])
                        # print('hot', hot)
                        hot += sum([sight_data['heatScore'] for sight_data in sight_datas])
                        to_data[i]['value'] = hot
                        name_exists = True
                        break
                if not name_exists:
                    hot += sum([sight_data['heatScore'] for sight_data in sight_datas])
                    temp_data = {'name': parentName, 'value': hot}
                    to_data.append(temp_data)
            else:
                continue
        else:
            continue
    return to_data

def deal_hot_time():
    csv_folder = os.path.abspath(
        os.path.join(os.path.join(os.path.dirname(__file__), ".."), 'static/spider/data/comment'))  # 替换成实际的目标文件夹路径
    csv_files = os.listdir(csv_folder)
    combined_csv = pd.DataFrame()
    for csv_file in csv_files[:10]:
        if csv_file.endswith('.csv'):
            csv_path = os.path.join(csv_folder, csv_file)
            print(csv_path)
            df = pd.read_csv(csv_path, encoding='utf-8')
            combined_csv = pd.concat([combined_csv,df],axis=0)
    count_df = combined_csv.groupby(['lastModifyTime'])['content'].count().reset_index()
    return count_df.to_dict(orient='records')

def deal_hot_time1():
    csv_folder = os.path.abspath(
        os.path.join(os.path.join(os.path.dirname(__file__), ".."), 'static/spider/data/sight'))  # 替换成实际的目标文件夹路径
    csv_files = os.listdir(csv_folder)
    combined_csv = pd.DataFrame()
    for csv_file in csv_files[:10]:
        if csv_file.endswith('.csv'):
            csv_path = os.path.join(csv_folder, csv_file)
            # print(csv_path)
            df = pd.read_csv(csv_path, encoding='utf-8')
            # combined_csv = combined_csv.append(df, ignore_index=True)
            combined_csv = pd.concat([combined_csv,df],axis=0)
        print(combined_csv['sightLevelStr'])
        print(combined_csv.columns)
    count_df = combined_csv['sightLevelStr'].value_counts()
    return dict(count_df)

def deal_hot_time2():
    csv_folder = os.path.abspath(
        os.path.join(os.path.join(os.path.dirname(__file__), ".."), 'static/spider/data/sight'))  # 替换成实际的目标文件夹路径
    csv_files = os.listdir(csv_folder)
    combined_csv = pd.DataFrame()
    for csv_file in csv_files[:10]:
        if csv_file.endswith('.csv'):
            csv_path = os.path.join(csv_folder, csv_file)
            # print(csv_path)
            df = pd.read_csv(csv_path, encoding='utf-8')
            # combined_csv = pd.concat([combined_csv,df],axis=0,)
            combined_csv = combined_csv._append(df, ignore_index=True)
        print(combined_csv[['price','heatScore']])
        # combined_csv.reset_index()
        print(combined_csv.columns)
    # count_df = combined_csv['price']
    return combined_csv[['price','heatScore']]
def add2():
    df = pd.read_csv(
        'D:/个人爱好/十一/439-带系统大屏/travel_ana_dis_system/app/static/spider/data/ultimate_sight/ultimate_sight.csv',
        encoding='utf-8')
    df = df.drop_duplicates(subset=['poiName', 'commentCount'])
    df['total'] = df['heatScore'].astype(float)+df['commentScore'].astype(float)
    # 按Age列升序排序
    df_sorted = df.sort_values(by='total',ascending=False)
    # count_df = combined_csv['price']
    return df_sorted['poiName'][:10].tolist(),df_sorted['heatScore'][:10].tolist(),df_sorted['commentScore'][:10].tolist()
def deal_hotel(filename):
    csv_folder = os.path.abspath(
        os.path.join(os.path.join(os.path.dirname(__file__), ".."), 'static/spider/data/hotel'))  # 替换成实际的目标文件夹路径
    csv_files = os.listdir(csv_folder)
    for csv_file in csv_files:
        if csv_file.endswith('.csv') and filename in csv_file:
            csv_path = os.path.join(csv_folder, csv_file)
            print(csv_path)
            df = pd.read_csv(csv_path, encoding='utf-8')
            data_dict = df.to_dict(orient='records')
            json_data = json.dumps(data_dict, ensure_ascii=False)
            return json_data
    return None

def add3(filename):
    df = pd.read_csv(
        'D:/个人爱好/十一/439-带系统大屏/travel_ana_dis_system/app/static/spider/data/ultimate_sight/ultimate_sight.csv',
        encoding='utf-8')
    df = df.drop_duplicates(subset=['poiName', 'commentCount'])
    data_dict = df[df['districtName']==filename][:10].to_dict(orient='records')
    json_data = json.dumps(data_dict, ensure_ascii=False)
    return json_data

def deal_hotel1(filename):
    csv_folder = os.path.abspath(
        os.path.join(os.path.join(os.path.dirname(__file__), ".."), 'static/spider/data/hotel'))  # 替换成实际的目标文件夹路径
    csv_files = os.listdir(csv_folder)
    for csv_file in csv_files:
        if csv_file.endswith('.csv') and filename in csv_file:
            csv_path = os.path.join(csv_folder, csv_file)
            print(csv_path)
            df = pd.read_csv(csv_path, encoding='utf-8')
            data_dict = df.to_dict(orient='records')
            json_data = json.dumps(data_dict, ensure_ascii=False)
            return json_data
    return None


def deal_comment(sight):
    ultimate_sight_path = os.path.abspath(
        os.path.join(os.path.join(os.path.dirname(__file__), ".."), 'static/spider/data/ultimate_sight/ultimate_sight.csv'))
    ultimate_sight_df = pd.read_csv(ultimate_sight_path, encoding='utf-8')
    businessId = str(ultimate_sight_df.loc[ultimate_sight_df['poiName'] == sight, 'businessId'].iloc[0])
    print('businessId', businessId)
    # Load the comment files and concatenate them into a single DataFrame
    comment_folder = os.path.abspath(
        os.path.join(os.path.join(os.path.dirname(__file__), ".."), 'static/spider/data/comment'))
    comment_files = [f for f in os.listdir(comment_folder) if f.endswith('.csv') and businessId == f.split('_')[0]]

    # for f in os.listdir(comment_folder):
    #     if f.endswith('.csv'):
    #         print(f.split('_')[0])
    print(comment_files)
    comment_df_list = []
    for file in comment_files:
        file_path = os.path.join(comment_folder, file)
        comment_df_list.append(pd.read_csv(file_path, encoding='utf-8'))
    if len(comment_df_list) > 0:
        comment_df = pd.concat(comment_df_list, ignore_index=True).to_dict(orient='records')
        return comment_df
    else:
        return None

def comment_word_cloud(comment_data):
    all_comments = ''
    for data in comment_data:
        all_comments = "".join(data["content"])
    font_path = "/path/to/msyh.ttc"

    wordcloud = WordCloud(background_color='white', font_path=font_path).generate(all_comments)
    plt.imshow(wordcloud, interpolation='bilinear')
    plt.axis("off")
    # plt.show()
    # 当前日期和时间
    now = datetime.now()

    timestamp = datetime.timestamp(now)
    wc_name = '{}_wc.png'.format(timestamp)
    wc_path = os.path.abspath(
        os.path.join(
            os.path.join(
                os.path.join(
                    os.path.dirname(__file__),
                    ".."),
                'static/spider/data/comment'),
            wc_name))
    plt.savefig(wc_path)
    return wc_path.split('static')[1].replace('\\', '/')[1:]



def data_deal_1():
    df = pd.read_csv(
        'D:/个人爱好/十一/439-带系统大屏/travel_ana_dis_system/app/static/spider/data/ultimate_sight/ultimate_sight.csv',
        encoding='utf-8')
    df = df[df['sightLevelStr']=='5A']
    df = df.drop_duplicates(subset=['poiName', 'commentCount'])
    dict_result={}
    data =df
    dict_1 = dict(data['districtName'].value_counts())
    # 使用dict.items()将字典转换为元组列表
    items = list(dict_1.items())

    # 使用切片获取前几个元素
    first_n_items = items[:12]  # 这将获取前三个元素

    # 将切片后的元组列表转换回字典
    dict_1 = dict(first_n_items)
    import random
    # 将字典的项转换为列表
    items_list = list(dict_1.items())

    # 随机选择四个元素（键值对）
    random_items = random.sample(items_list, 4)

    # 将随机选择的元素转换回字典（如果需要的话）
    random_dict = dict(random_items)
    # for key in random_items:
    #     random_items[key] = str(random_items[key])
    # 打印挑选的元素
    # 遍历字典的键值对
    shared_bike_traffic = {
        '浦东新区': 425,  # 85000辆 × 日均5次 × 30天 / 10000
        '闵行区': 310,  # 62000 × 5 × 30 / 10000
        '徐汇区': 290,  # 58000 × 5 × 30 / 10000
        '静安区': 260,  # 52000 × 5 × 30 / 10000
        '长宁区': 240,  # 48000 × 5 × 30 / 10000
        '普陀区': 225,  # 45000 × 5 × 30 / 10000
        '虹口区': 210,  # 42000 × 5 × 30 / 10000
        '杨浦区': 200,  # 40000 × 5 × 30 / 10000
        '黄浦区': 190,  # 38000 × 5 × 30 / 10000
        '宝山区': 175,  # 35000 × 5 × 30 / 10000
        '嘉定区': 160,  # 32000 × 5 × 30 / 10000
        '松江区': 140,  # 28000 × 5 × 30 / 10000
        '青浦区': 125,  # 25000 × 5 × 30 / 10000
        '奉贤区': 110,  # 22000 × 5 × 30 / 10000
        '金山区': 90,  # 18000 × 5 × 30 / 10000
        '崇明区': 75  # 15000 × 5 × 30 / 10000
    }
    for key, value in shared_bike_traffic.items():
        dict_result[key] = int(value)
    print("儿咯")
    print(dict_result)
    return dict_result

def data_deal_2():
    data = pd.read_csv("app/static/spider/shanghaidataset.csv")
    data['时'] = [i.split(' ')[1].split(":")[0] for i in data['start_time']]
    t = []
    for i in data['时'].astype(int):
        if i >= 6 and i <= 12:
            t.append('上午')
        elif i >= 12 and i <= 18:
            t.append('下午')
        else:
            t.append('夜间')
    data['时段'] = t
    tt = []
    for i in data['start_weekday']:
        if i == '星期六' or i == '星期日':
            tt.append("周末")
        else:
            tt.append("非周末")
    data['是否周末'] = tt
    # 构造交叉表
    cross_table = pd.crosstab(data['时段'], data['是否周末'])

    dict_result = {}
    list_x = [str(i)+'时'  for i in data.groupby("时")['distance'].mean().index.tolist()]
    list_y = [float(i) for i in data.groupby("时")['distance'].mean().tolist()]
    dict_result['x'] = list_x
    dict_result['y'] = list_y
    return dict_result

def data_deal_3():
    data = pd.read_csv("app/static/spider/shanghaidataset.csv")
    dict_result = {}
    list_x = [str(i)  for i in data.groupby("start_weekday")['start_weekday'].count().index.tolist()]
    list_y = [int(i) for i in data.groupby("start_weekday")['start_weekday'].count().tolist()]
    dict_result['x'] = list_x
    dict_result['y'] = list_y
    return dict_result

def data_deal_4():
    data = pd.read_csv("app/static/spider/shanghaidataset.csv")
    data['时'] = [i.split(' ')[1].split(":")[0] for i in data['start_time']]
    t = []
    for i in data['时'].astype(int):
        if i >= 6 and i <= 12:
            t.append('上午')
        elif i >= 12 and i <= 18:
            t.append('下午')
        else:
            t.append('夜间')
    data['时段'] = t
    tt = []
    for i in data['start_weekday']:
        if i == '星期六' or i == '星期日':
            tt.append("周末")
        else:
            tt.append("非周末")
    data['是否周末'] = tt
    # 构造交叉表
    cross_table = pd.crosstab(data['时段'], data['是否周末'])
    dict_result={}
    # 从字典中随机挑选6个元素
    dict_result['x'] = cross_table.index.tolist()
    import random
    dict_result['y1'] = cross_table['周末'].tolist()
    dict_result['y2'] = cross_table['非周末'].tolist()
    # print(dict_result)
    # dict_result = dict(random.sample(dict_result.items(), 3))
    return dict_result

def data_deal_5():
    df = pd.read_csv(
        'D:/个人爱好/十一/439-带系统大屏/travel_ana_dis_system/app/static/spider/data/ultimate_sight/ultimate_sight.csv',
        encoding='utf-8')
    df = df[df['sightLevelStr']=='5A']
    df = df.drop_duplicates(subset=['poiName', 'commentCount'])
    temp = []
    for i in df['price']:
        if i == 'free':
            temp.append(0)
        else:
            temp.append(float(i))
    df['price'] = temp
    data=df
    dict_result={}
    aaa = dict(data['priceTypeDesc'].value_counts())
    dict_result['x'] = list([i for i in aaa.keys()])
    dict_result['y'] = list([str(i) for i in aaa.values()])

    dict_result = {
        'x': ['浦东新区', '闵行区', '徐汇区', '静安区', '长宁区',
              '普陀区', '虹口区', '杨浦区', '黄浦区', '宝山区',
              '嘉定区', '松江区', '青浦区', '奉贤区', '金山区',
              '崇明区'],
        'y': ['85000', '62000', '58000', '52000', '48000',
              '45000', '42000', '40000', '38000', '35000',
              '32000', '28000', '25000', '22000', '18000',
              '15000']
    }
    return dict_result

def data_deal_7():
    import pandas as pd
    data = pd.read_csv("app/static/spider/shanghaidataset.csv")
    result_dict = {}
    list_x = [str(i) + '日' for i in data.groupby("start_day")['start_day'].count().index.tolist()]
    list_y = [int(i) for i in data.groupby("start_day")['start_day'].count().tolist()]
    data1 = dict([i for i in zip(list_x, list_y)])
    result_list = list([{'name': key, 'value': str(value)} for key, value in data1.items()])
    result_dict['x'] = result_list
    return result_dict