from pyproj import Transformer

from app import db

from . import home
from flask import render_template, redirect, url_for, flash, session, request, jsonify
from app.models import *
from sqlalchemy import or_, and_
from functools import wraps
from decimal import *
from werkzeug.security import generate_password_hash, check_password_hash
import pandas as pd
import os
import time
from datetime import datetime, date
import random
import numpy as np
import copy
import json
from .spiders import *
from app.home.algorithms import *
# from .display import *
from .deal_data import *

def home_login(f):
    """
    登录装饰器
    """
    @wraps(f)
    def decorated_function(*args, **kwargs):
        # print(bool('user' in session))
        if "user" not in session:
            print('session', session)
            return redirect(url_for("home.login", next=request.url))
        return f(*args, **kwargs)
    return decorated_function


@home.route('/', methods=["GET","POST"])
@home_login
def index():
    if request.method == "GET":
        return render_template('home/index.html')
    else:
        return render_template('home/index.html')


@home.route('/login', methods=["GET", "POST"])
def login():
    if request.method == "GET":
        if 'user' in session:
            return redirect(url_for("home.index"))
        return render_template('home/login.html')
    if request.method == "POST":
        post_data = request.form
        print('[post_data]', post_data)
        if post_data:
            user = User.query.filter_by(username=post_data['username']).first()
            if user:
                if user.check_password(post_data['password']):
                    db.session.commit()
                    session["user"] = post_data["username"]  # 存入session
                    session["user_id"] = user.id  # 存入session
                return redirect(url_for('home.index'))
            else:
                print('usernmae', post_data['username'])
                admin = Admin.query.filter_by(manager=post_data['username']).first()
                if admin:
                    if admin.check_password(post_data['password']):
                        db.session.commit()
                        session["user"] = admin.manager  # 存入session
                        session["user_id"] = admin.id  # 存入session
                    return redirect(url_for('home.index'))


@home.route('/register', methods=["GET", "POST"])
def register():
    if request.method == "GET":
        return render_template('home/register.html')
    if request.method == "POST":
        from_web_data = request.form
        user = User.query.filter_by(username = from_web_data['username']).first()
        if user:
            print('has user', user)
            return redirect(url_for('home.login'))
        else:
            if from_web_data['password'] == from_web_data['confired_password']:
                user = User(
                    password = generate_password_hash(from_web_data['password']),
                    username = from_web_data['username']
                )
                db.session.add(user)
                db.session.commit()
                return redirect(url_for('home.login'))
            else:
                return redirect(url_for('home.register'))


@home.route('/logout/')
@home_login
def logout():
    """
    后台注销登录
    """
    User.query.filter_by(id=session['user_id']).update(
        {"status": 0}
    )
    db.session.commit()
    session.pop("user", None)
    session.pop("user_id", None)
    session.pop("avatar", None)
    return redirect(url_for("home.login"))


@home.route('/sight_map')
@home_login
def sight_map2():
    return render_template('home/sight_map2.html')


@home.route('/dis_hot_map')
@home_login
def dis_hot_map():
    # to_e_datas = deal_hot_map()
    to_e_datas={}
    print(to_e_datas)
    for to_e_data in to_e_datas:
        to_e_data['value'] = int(to_e_data['value'])
    return render_template('home/dis_hot_map.html', to_e_data=to_e_datas)
from flask import jsonify
import json


@home.route('/dis_hot_map1')
@home_login
def dis_hot_map1():
    to_e_datas = deal_hot_map1()

    # 处理数据为前端需要的格式
    processed_data = []
    for to_e_data in to_e_datas:
        processed_data.append({
            'group': to_e_data['group'],
            'X': float(to_e_data['X']),
            'Y': float(to_e_data['Y']),
            '商业': float(to_e_data['商业']),
            '养老': float(to_e_data['养老']),
            '教育': float(to_e_data['教育']),
            '休闲': float(to_e_data['休闲']),
            '医疗': float(to_e_data['医疗']),
            '综合得': float(to_e_data['综合得']),
            '分类': to_e_data['分类']
        })
    # return render_template('home/dis_hot_map.html', to_e_data=to_e_datas)
    return jsonify(processed_data)


import pandas as pd


def is_point_in_polygon(point, polygon):
    """判断点是否在多边形内部"""
    x, y = point
    n = len(polygon)
    inside = False

    p1x, p1y = polygon[0]
    for i in range(1, n + 1):
        p2x, p2y = polygon[i % n]
        if y > min(p1y, p2y):
            if y <= max(p1y, p2y):
                if x <= max(p1x, p2x):
                    if p1y != p2y:
                        xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
                    if p1x == p2x or x <= xinters:
                        inside = not inside
        p1x, p1y = p2x, p2y

    return inside


def is_polygon_closed(polygon):
    """检查多边形是否闭合"""
    if len(polygon) < 3:
        return False
    return polygon[0] == polygon[-1]


def get_boundary_indices(df, lon_col='X', lat_col='Y'):
    """
    获取边界点的索引
    返回：
    - boundary_indices: 边界点的索引列表
    - inner_indices: 内部点的索引列表
    """
    # 获取经纬度点列表
    points = list(zip(df[lon_col], df[lat_col]))

    # 去除连续重复点并记录原始索引
    unique_points = []
    original_indices = []
    for i, point in enumerate(points):
        if i == 0 or point != points[i - 1]:
            unique_points.append(point)
            original_indices.append(i)

    # 检查是否形成有效多边形
    if len(unique_points) < 3:
        return list(range(len(df))), []  # 点数不足，返回所有点

    # 检查是否闭合
    closed = is_polygon_closed(unique_points)
    polygon = unique_points if closed else unique_points + [unique_points[0]]

    # 分离边界点和内部点
    boundary_indices = []
    inner_indices = []

    for idx, point in zip(original_indices, unique_points):
        # 第一个和最后一个点自动视为边界点
        if point in [unique_points[0], unique_points[-1]]:
            boundary_indices.append(idx)
        else:
            if is_point_in_polygon(point, polygon):
                inner_indices.append(idx)
            else:
                boundary_indices.append(idx)

    # 如果多边形未闭合，确保边界点闭合
    if not closed and len(boundary_indices) > 0:
        boundary_indices.append(boundary_indices[0])

    return boundary_indices, inner_indices


# 示例：从DataFrame处理数据并保留其他字段
def process_dataframe_with_other_fields(df, lon_col='X', lat_col='Y'):
    """
    处理DataFrame中的经纬度数据，删除内部点，保留其他字段
    参数：
    - df: 包含经纬度数据的DataFrame
    - lon_col: 经度列名
    - lat_col: 纬度列名
    返回：
    - boundary_df: 只包含边界点的新DataFrame（保留所有字段）
    - inner_df: 包含内部点的新DataFrame（保留所有字段）
    """
    # 获取边界点和内部点的索引
    boundary_indices, inner_indices = get_boundary_indices(df, lon_col, lat_col)

    # 创建边界点DataFrame（保留所有字段）
    boundary_df = df.iloc[boundary_indices].reset_index(drop=True)

    # 创建内部点DataFrame（保留所有字段）
    inner_df = df.iloc[inner_indices].reset_index(drop=True) if inner_indices else pd.DataFrame()

    return boundary_df, inner_df
# 假设的deal_hot_map函数，实际应根据你的数据源实现
def deal_hot_map1():
    import pandas as pd
    data = pd.read_csv("app/static/spider/新坐标.csv", encoding='gbk')
    data['Y']=data['Y']-0.0030
    data['X']=data['X']+0.0015
    # data=data.drop_duplicates(subset=['X','Y'])
    # data['group']=   data['group'].astype(int)
    # # 选择需要的列并转换为字典列表
    # data1=data[(data['group']==1)]
    # data2=data[(data['group']==21)]
    # print(data1.shape)
    # data1=data1.drop_duplicates(['X','Y'],keep='last')
    # data2=data2.drop_duplicates(['X','Y'],keep='last')
    # print(data1.shape)
    # # 处理DataFrame（保留所有字段）
    # boundary_df, inner_df = process_dataframe_with_other_fields(data1)
    # data1 = boundary_df
    # boundary_df, inner_df = process_dataframe_with_other_fields(data2)
    # data2 = boundary_df
    # print(data2.shape)
    # data = pd.concat([data1,data2],axis=0)
    result = data[['group', 'X', 'Y', '商业', '养老', '教育', '休闲', '医疗', '综合得', '分类']].to_dict('records')

    return result

# @home.route('/dis_hot_recommend')
# @home_login
# def dis_hot_recommend():
#     import pandas as pd
#     # data = pd.read_csv("shanghaidataset.csv")
#     # data['时'] = [i.split(' ')[1].split(":")[0] for i in data['start_time']]
#     # data['年月日'] = [i.split(' ')[0] for i in data['start_time']]
#     # data['年月日时'] = data['年月日'] + '-' + data['时']
#     # # 转换为 datetime 格式
#     # data["年月日时"] = pd.to_datetime(data["年月日时"], format="%Y-%m-%d-%H")
#     # # 按时间升序排序（默认）
#     # data = data.sort_values('年月日时')
#     # data
#     # df = pd.DataFrame(data.groupby('年月日时')['年月日时'].count())
#     # df['年月日时'].tolist()
#     # import numpy as np
#     # import pandas as pd
#     # import matplotlib.pyplot as plt
#     # from sklearn.preprocessing import MinMaxScaler
#     # from keras.models import Sequential
#     # from keras.layers import LSTM, Dense, Dropout
#     # from sklearn.metrics import mean_squared_error, mean_absolute_error
#     # df = pd.DataFrame(data.groupby('年月日时')['年月日时'].count())
#     # df['value'] = df['年月日时'].interpolate()  # 线性插值填充缺失值
#     # # 数据归一化
#     # scaler = MinMaxScaler(feature_range=(0, 1))
#     # scaled_data = scaler.fit_transform(df[['value']])
#     #
#     # # 创建时间序列数据集
#     # def create_dataset(data, look_back=72):  # 使用3天历史数据预测
#     #     X, y = [], []
#     #     for i in range(len(data) - look_back):
#     #         X.append(data[i:(i + look_back), 0])
#     #         y.append(data[i + look_back, 0])
#     #     return np.array(X), np.array(y)
#     #
#     # look_back = 72  # 72小时（3天）时间窗口
#     # X, y = create_dataset(scaled_data, look_back)
#     #
#     # # 划分训练集和测试集（最后7天作为测试）
#     # test_size = 7 * 24
#     # X_train, X_test = X[:-test_size], X[-test_size:]
#     # y_train, y_test = y[:-test_size], y[-test_size:]
#     #
#     # # 调整输入格式 [样本数, 时间步长, 特征数]
#     # X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)
#     # X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], 1)
#     # model = Sequential([
#     #     LSTM(100, return_sequences=True, input_shape=(look_back, 1)),
#     #     Dropout(0.2),
#     #     LSTM(100, return_sequences=True),
#     #     Dropout(0.2),
#     #     LSTM(50),
#     #     Dropout(0.2),
#     #     Dense(1)
#     # ])
#     # model.compile(optimizer='adam', loss='mse')
#     #
#     # # 添加早停和模型检查点
#     # from keras.callbacks import EarlyStopping, ModelCheckpoint
#     # callbacks = [
#     #     EarlyStopping(patience=10, restore_best_weights=True),
#     #     ModelCheckpoint('best_model.h5', save_best_only=True)
#     # ]
#     #
#     # history = model.fit(X_train, y_train,
#     #                     epochs=100,
#     #                     batch_size=32,
#     #                     validation_split=0.1,
#     #                     callbacks=callbacks,
#     #                     shuffle=False)
#     # # 加载最佳模型
#     # model.load_weights('best_model.h5')
#     #
#     # # 进行预测
#     # train_predict = model.predict(X_train)
#     # test_predict = model.predict(X_test)
#     #
#     # # 反归一化
#     # train_predict = scaler.inverse_transform(train_predict)
#     # y_train_actual = scaler.inverse_transform(y_train.reshape(-1, 1))
#     # test_predict = scaler.inverse_transform(test_predict)
#     # y_test_actual = scaler.inverse_transform(y_test.reshape(-1, 1))
#     #
#     # # 计算评估指标
#     # def evaluate_predictions(actual, predicted):
#     #     mse = mean_squared_error(actual, predicted)
#     #     mae = mean_absolute_error(actual, predicted)
#     #     rmse = np.sqrt(mse)
#     #     print(f'MSE: {mse:.2f}')
#     #     print(f'MAE: {mae:.2f}')
#     #     print(f'RMSE: {rmse:.2f}')
#     #     return mse, mae, rmse
#     #
#     # print("Train Evaluation:")
#     # train_metrics = evaluate_predictions(y_train_actual, train_predict)
#     # print("\nTest Evaluation:")
#     # test_metrics = evaluate_predictions(y_test_actual, test_predict)
#     #
#     # # 准备完整时间序列的预测数据
#     # def prepare_plot_data(actual, train_pred, test_pred, look_back):
#     #     train_pred_plot = np.empty_like(actual)
#     #     train_pred_plot[:, :] = np.nan
#     #     train_pred_plot[look_back:look_back + len(train_pred), :] = train_pred
#     #
#     #     test_pred_plot = np.empty_like(actual)
#     #     test_pred_plot[:, :] = np.nan
#     #     test_pred_plot[look_back + len(train_pred):look_back + len(train_pred) + len(test_pred), :] = test_pred
#     #
#     #     return train_pred_plot, test_pred_plot
#     #
#     # train_plot, test_plot = prepare_plot_data(
#     #     scaler.inverse_transform(scaled_data),
#     #     train_predict,
#     #     test_predict,
#     #     look_back
#     # )
#     # # 绘制测试集详细对比
#     # test_dates = df.index[-len(y_test_actual):]
#     # plt.figure(figsize=(18, 6))
#     # plt.plot(test_dates, y_test_actual, label='Actual', marker='o')
#     # plt.plot(test_dates, test_predict, label='Predicted', marker='x')
#     # plt.fill_between(test_dates,
#     #                  y_test_actual.flatten() - test_metrics[2],
#     #                  y_test_actual.flatten() + test_metrics[2],
#     #                  alpha=0.2, color='gray', label='± RMSE')
#     # plt.title('Test Set: Actual vs Predicted with Confidence Interval')
#     # plt.xlabel('Timestamp')
#     # plt.ylabel('Value')
#     # plt.legend()
#     # plt.grid(True)
#     # plt.xticks(rotation=45)
#     # plt.tight_layout()
#     # plt.show()
#     # data_LSTM = pd.DataFrame()
#     # data_LSTM['x'] = test_dates
#     # data_LSTM['y'] = y_test_actual
#     # data_LSTM['pred'] = test_predict
#     # data_LSTM.to_excel("LSTM_result.xlsx")
#     data = pd.read_excel("app/static/spider/LSTM_result.xlsx")
#     to_se_data={}
#     to_se_data['xdata'] =[ i for i in range(len(data))]
#     to_se_data['ydata'] = data['y'].tolist()
#     to_se_data['y2data'] = data['pred'].tolist()
#     return render_template('home/dis_hot_recommend.html', to_se_data=to_se_data)





@home.route('/dis_hot_recommend')
@home_login
def dis_hot_recommend():
    # 读取Excel文件
    data = pd.read_csv("app/static/spider/deal_得分坐标.csv")
    data=data.drop_duplicates(['group'],keep='last')

    # 分页参数
    page = request.args.get('page', 1, type=int)
    per_page = 10  # 每页显示10条

    # 获取搜索词
    search_term = request.args.get('search_term', '').lower()

    # 执行搜索
    if search_term:
        mask = data.astype(str).apply(lambda x: x.str.lower().str.contains(search_term)).any(axis=1)
        filtered_data = data[mask]
    else:
        filtered_data = data

        # 分页
    paginated_data = filtered_data.iloc[(page - 1) * per_page: page * per_page]
    total_pages = (len(filtered_data) // per_page + (1 if len(filtered_data) % per_page else 0))

                   # 将数据转换为字典列表格式
    table_data = paginated_data.to_dict('records')
    columns = data.columns.tolist()

    return render_template('home/dis_hot_recommend.html',
                           table_data=table_data,
                           columns=columns,
                           current_page=page,
                           total_pages=total_pages,
                           search_term=search_term)


@home.route('/update_lstm_data', methods=['POST'])
@home_login
def update_lstm_data():
    try:
        # 获取前端提交的数据
        updated_data = request.get_json()

        # 转换为DataFrame
        df = pd.DataFrame(updated_data)

        # 保存到Excel文件
        df.to_excel("app/static/spider/deal_得分坐标.csv", index=False)

        return jsonify({'status': 'success', 'message': '数据保存成功!'})
    except Exception as e:
        return jsonify({'status': 'error', 'message': str(e)})

@home.route('/dis_time_hot')
@home_login
def dis_time_hot():
    time_hot_data = deal_hot_time()
    time_hot_data1 = deal_hot_time1()
    to_se_data = {}
    to_se_data['xdata'] = []
    to_se_data['ydata'] = []
    to_se_data['pix_data'] = []
    for hot_data in time_hot_data:
        to_se_data['xdata'].append(hot_data['lastModifyTime'])
        to_se_data['ydata'].append(hot_data['content'])
        to_se_data['pix_data'].append({
            'name': hot_data['lastModifyTime'],
            'value': hot_data['content']
        })
    to_se_data1 = {}
    to_se_data1['xdata'] = []
    to_se_data1['ydata'] = []
    to_se_data1['pix_data'] = []
    for k,v in time_hot_data1.items():
        to_se_data1['xdata'].append(k)
        to_se_data1['ydata'].append(v)
        to_se_data1['pix_data'].append({
            'name':k,
            'value':v
        })
    print("查看一下数据！！！")
    print(to_se_data1)
    return render_template('home/dis_time_hot.html', to_se_data=to_se_data,to_se_data1 = to_se_data1)

@home.route('/hot_hotel_recommend1', methods=['GET', "POST"])
@home_login
def hot_hotel_recommend1():
    if request.method == "GET":
        # 获取工具信息
        tools = [
            {
                'title': '城市生活圈识别工具说明',
                'description': '本工具通过深度挖掘手机信令等动态轨迹数据，突破传统静态空间划分的局限，实现生活圈的精准识别与动态刻画。本工具框架流程图如下',
                'description2': '工具采用GCN联合CENSA算法为核心技术框架，通过对海量人口流动特征的智能聚类分析，构建了科学可靠的生活圈识别模型。本工具的使用介绍如下',
                'image_url': url_for('static', filename='images/图片1.png'),
                'video_url': url_for('static', filename='vedios/20250507_105135.mp4'),
                'contact': '技术支持：tech@example.com | 联系电话：400-123-4567',
                'filename': 'data_analysis_tools.zip',
                'filepath': 'static/downloads/data_analysis_tools.zip'
            },
            # {
            #     'title': '公共服务设施供需匹配评估工具',
            #     'description': '本工具是以全龄友好理念为导向，通过整合多源时空大数据，实现对城市公共服务设施配置效能的精准评估。',
            #     'description2': '工具采用"供给-需求"双维分析框架，系统揭示设施布局与居民动态需求的空间匹配关系，为城市规划提供数据支撑。',
            #     'image_url': url_for('static', filename='images/图片1.jpg'),
            #     'video_url': url_for('static', filename='vedios/20250507_105135.mp4'),
            #     'contact': '技术支持：support@example.com | 联系电话：400-987-6543',
            #     'filename': 'visualization_templates.zip',
            #     'filepath': 'static/downloads/visualization_templates.zip'
            # }
        ]
        return render_template('home/hot_hotel_recommend.html', tools=tools)
    else:
        # 原有的POST处理逻辑保持不变
        from_web_data = request.form
        print(from_web_data['keyword'])
        hotel_data = deal_hotel(from_web_data['keyword'])
        if hotel_data != None:
            print(json.loads(hotel_data))
            price_ranges = {
                '100-300': 0,
                '300-500': 0,
                '500-800': 0,
                '>800': 0
            }
            for hotel in json.loads(hotel_data):
                price = hotel['price']
                if price >= 100 and price < 300:
                    price_ranges['100-300'] += 1
                elif price >= 300 and price < 500:
                    price_ranges['300-500'] += 1
                elif price >= 500 and price < 800:
                    price_ranges['500-800'] += 1
                elif price >= 800:
                    price_ranges['>800'] += 1
            to_e_data = []
            for key, value in price_ranges.items():
                to_e_data.append({'name': key, 'value': value})
            return render_template('home/hot_hotel_recommend.html', code=200, hotel_data=json.loads(hotel_data),
                                   to_e_data=to_e_data)
        else:
            error = '暂无数据'
            return render_template('home/hot_hotel_recommend.html', error=error)
from flask import send_file
import os





@home.route('/download/<filename>')
@home_login
def download_file(filename):
    'D:\科研\城环杯\travel_ana_dis_system\app\static\downloads\data_analysis_tools.zip'
    # 确保文件存在
    filepath = os.path.join('static', 'downloads', filename)
    print(filepath)
    if not os.path.exists(filepath):
        return "文件不存在", 404

    return send_file(filepath, as_attachment=True)

# @home.route('/hot_sight_back', methods=['GET', "POST"])
# @home_login
# def hot_sight_back():
#     if request.method == "GET":
#         return render_template('home/hot_sight_back.html')
#     else:
#         from_web_data = request.form
#         print(from_web_data['keyword'])
#         comment_data = deal_comment(from_web_data['keyword'])
#         # print(comment_data)
#         # print('[comment_data]: ', comment_data)
#         emotion_comment = NLP_EMOTION(comment_data)
#         img_path = comment_word_cloud(comment_data)
#         return render_template('home/hot_sight_back.html', code=200, emotion_comment=emotion_comment, img_path=img_path)


from flask import request, render_template, jsonify, send_file
import pandas as pd
from io import BytesIO


@home.route('/hot_sight_back', methods=['GET', 'POST'])
@home_login
def hot_sight_back():
    default_data = [
        {
            "设施类型": "商业服务", "大类": "大型商业", "小类": "",
            "青少年": 0.719042311, "青年": 0.812494667, "中年":0.835031351, "老年": 0.880566963
        },

        {
            "设施类型": "养老服务", "大类": "养老设施", "小类": "",
            "青少年":0.0392368520, "青年": 0.047081332, "中年":0.05566026, "老年": 0.071805102
        },
        {
            "设施类型": "基础教育", "大类": "幼儿园", "小类": "",
            "青少年": 0.504930542, "青年": 0.485589329, "中年": 0.522307893, "老年":0.565697445
        },

        {
            "设施类型": "文体娱乐", "大类": "文化", "小类": "",
            "青少年":0.415131957 , "青年": 0.482504853, "中年": 0.5077248010, "老年":0.568905751
        },

        {
            "设施类型": "医疗卫生", "大类": "综合医院", "小类": "",
            "青少年": 0.528807047, "青年": 0.589115503, "中年": 0.637587929, "老年": 0.708727598
        },

    ]


    if request.method == 'POST':
        if request.is_json:
            data = request.get_json()

            import pandas as pd
            if data.get('action') == 'save':
            # 保存数据逻辑保持不变
                df = pd.DataFrame(data['table_data'])
                csv_buffer = BytesIO()
                df.to_csv(csv_buffer, index=False, encoding='utf-8-sig')
                csv_buffer.seek(0)
                return jsonify({
                'status': 'success',
                'message': '数据保存成功',
                'csv_data': csv_buffer.getvalue().decode('utf-8-sig')
                })

            elif data.get('action') == 'export':
                # 导出CSV逻辑保持不变
                df = pd.DataFrame(data['table_data'])
                csv_buffer = BytesIO()
                df.to_csv(csv_buffer, index=False, encoding='utf-8-sig')
                csv_buffer.seek(0)
                return send_file(
                csv_buffer,
                mimetype='text/csv',
                as_attachment=True,
                download_name='facility_weights.csv')

            elif data.get('action') == 'show_data':
                # 新添加的展示数据到控制台功能
                print("===== 前端表格数据 =====")
                result = []
                for item in data['table_data']:
                    # 提取数值字段并转换类型，处理None为0
                    numeric_list = [
                        # float(item['小类']),
                        float(item['青少年']),
                        float(item['青年']),
                        float(item['中年']),
                        0.0 if item['老年'] is None else float(item['老年'])
                    ]
                    result.append(numeric_list)

                print(result)
                # import pandas as pd
                # data_demend = pd.read_csv("app/static/spider/renkou.csv")
                # # data_demend['total'] = data_demend['QSN']+data_demend['QN']+data_demend['ZN']+data_demend['LN']
                # names = ['daxinshangye_count', 'xiaoxinshangye_and_caishichang_count', 'yanglao_count',
                #          'youeryuan_count',
                #          'xiaoxue_and_zhongxue_count', 'wenhua_count', 'tiyu_count', 'yule_count'
                #     , 'zongheyiyuan_count', 'zhensuo_count']
                # for i in range(len(result)):
                #     data_demend[names[i]] = data_demend['QSN'] * result[i][0] + data_demend['QN'] * result[i][1] + \
                #                             data_demend[
                #                                 'ZN'] * result[i][2] + data_demend['LN'] * result[i][3]
                # # data_demend
                # # import pandas as pd
                # data_suply = pd.read_csv("app/static/spider/gongji_xiaolei.csv")
                # data_suply['xiaoxinshangye_and_caishichang_count'] = data_suply['xiaoxinshangye_count'] * 0.5 + \
                #                                                      data_suply[
                #                                                          'caishichang_count'] * 0.5
                # data_suply['xiaoxue_and_zhongxue_count'] = data_suply['zhongxue_count'] * 0.5 + data_suply[
                #     'xiaoxue_count'] * 0.5
                # dataf = pd.DataFrame()
                # dataf['group'] = data_demend['group']
                # for i in names:
                #     dataf[i] = (2 * (data_suply[i] - data_demend[i])) / (data_suply[i] + data_demend[i])
                # dataf['商业'] = dataf['daxinshangye_count'] * 0.5 + dataf['xiaoxinshangye_and_caishichang_count'] * 0.5
                # dataf['养老'] = dataf['yanglao_count']
                # dataf['教育'] = dataf['youeryuan_count'] * 0.5 + dataf['xiaoxue_and_zhongxue_count'] * 0.5
                # dataf['休闲'] = dataf['wenhua_count'] * 0.333 + dataf['tiyu_count'] * 0.333 + dataf[
                #     'yule_count'] * 0.333
                # dataf['医疗'] = dataf['zongheyiyuan_count'] * 0.5 + dataf['zhensuo_count'] * 0.5
                # dataf['综合得'] = 2 - (
                #             (dataf['商业'] + dataf['养老'] + dataf['教育'] + dataf['休闲'] + dataf['商业']) / 5)
                # # dataf['商业'] = dataf['daxinshangye_count']*0.5+dataf['xiaoxinshangye_and_caishichang_count']*0.5
                # # '商业', '养老', '教育', '休闲', '医疗', '综合得', '分类'
                # import numpy as np
                # dataf['综合得'] = [((i - np.min(dataf['综合得'].tolist())) / (
                #         np.max(dataf['综合得'].tolist()) - np.min(dataf['综合得'].tolist()))) * 100 for i in
                #                    dataf['综合得']]
                #
                # datat = pd.read_csv("app/static/spider/新坐标.csv", encoding='gbk')
                # dataff = pd.merge(dataf, datat[['X', 'Y', 'group', '分类']], on='group', how='inner')
                # dataff.to_csv("app/static/spider/新坐标.csv", encoding='gbk')
                return jsonify({
                'status': 'success',
                'message': '供需匹配数据已成功计算完成！！'
            })

    return render_template('home/hot_sight_back.html', table_data=default_data)


# 添加新的路由用于跳转页面
@home.route('/other_page')
@home_login
def other_page():
    return render_template('home/dis_hot_map.html')
# @home.route('/ana_price', methods=['GET', "POST"])
# @home_login
# def ana_price():
#     import geopandas as gpd
#     shp_path = 'D:/科研/城环杯/km flow/km/昆明街区 (1).shp'  # 替换为你的实际shp文件路径
#     gdf = gpd.read_file(shp_path)
#     print(gdf.columns)
#
#     # 定义坐标转换器（假设源坐标系是CGCS2000 / 3-degree Gauss-Kruger zone 35）
#     transformer = Transformer.from_crs("EPSG:4544", "EPSG:4326", always_xy=True)
#
#     # 转换GeoJSON坐标到WGS84
#     def transform_geojson(feature):
#         if feature['geometry']['type'] == 'Polygon':
#             feature['geometry']['coordinates'] = [
#                 [transformer.transform(x, y) for x, y in ring]
#                 for ring in feature['geometry']['coordinates']
#             ]
#         return feature
#
#     # 转换为GeoJSON并转换坐标
#     geojson_data = json.loads(gdf.to_json())
#     geojson_data['features'] = [transform_geojson(f) for f in geojson_data['features']]
#     print(geojson_data)
#     # 准备其他数据
#     to_se_data1 = {}
#     to_se_data = {}
#
#     return render_template('home/ana_price.html',
#                            to_se_data=to_se_data,
#                            to_se_data1=to_se_data1,
#                            geojson_data=json.dumps(geojson_data))
@home.route('/ana_price', methods=['GET', "POST"])
@home_login
def ana_price():
    import geopandas as gpd
    import pandas as pd
    import json
    from pyproj import Transformer
    from flask import render_template

    # 读取街区 shapefile
    shp_path = 'D:/科研/城环杯/km flow/km/昆明街区 (1).shp'
    gdf = gpd.read_file(shp_path)

    # 读取标签表格（假设是 CSV，字段包含 Street_ID 和 标签列，例如 label）
    label_path = 'D:/科研/城环杯/km flow/street_clustering_results.csv'  # 修改为实际路径

    df_label = pd.read_csv(label_path)  # 如果是 Excel，改为 pd.read_excel(label_path)

    # 合并标签进 gdf
    gdf = gdf.merge(df_label, on='Street_ID', how='inner')
    print(gdf.shape)
    # 设置坐标转换器：从 EPSG:4544 -> WGS84
    transformer = Transformer.from_crs("EPSG:4544", "EPSG:4326", always_xy=True)

    # 转换单个 Feature 的坐标
    def transform_geojson(feature):
        if feature['geometry']['type'] == 'Polygon':
            feature['geometry']['coordinates'] = [
                [transformer.transform(x, y) for x, y in ring]
                for ring in feature['geometry']['coordinates']
            ]
        return feature

    # 转换为 GeoJSON
    geojson_data = json.loads(gdf.to_json())
    geojson_data['features'] = [
        transform_geojson(f) for f in geojson_data['features']
    ]

    # 准备传给前端的其他数据（可用 gdf 中的数据构造字典）
    # 例如构造 {Street_ID: label} 的形式
    to_se_data = dict(zip(gdf['Street_ID'], gdf['Cluster_Label']))  # 替换“标签列名”为你实际字段名

    return render_template('home/ana_price.html',
                           to_se_data=to_se_data,
                           to_se_data1={},
                           geojson_data=json.dumps(geojson_data))


@home.route('/ana_price1', methods=['GET', "POST"])
@home_login
def ana_price1():
    time_hot_data = deal_hot_time2()
    to_se_data = {}
    to_se_data['xdata'] = []
    to_se_data['ydata'] = []
    for i in range(len(time_hot_data)) :
        to_se_data['xdata'].append(time_hot_data['price'][i])
        to_se_data['ydata'].append(time_hot_data['heatScore'][i])
    if request.method == "GET":
        return render_template('home/ana_price1.html', to_se_data=to_se_data)
    else:
        return render_template('home/ana_price1.html' ,to_se_data=to_se_data)


@home.route('/ana_price2', methods=['GET', "POST"])
@home_login
def ana_price2():
    a,b,c = add2()
    to_se_data = {}
    to_se_data['xdata'] = []
    to_se_data['ydata'] = []
    to_se_data['y2data'] = []
    print(a)
    for i in range(len(a)) :
        to_se_data['xdata'].append(str(a[i]))
        to_se_data['ydata'].append(b[i])
        to_se_data['y2data'].append(c[i])
    if request.method == "GET":
        return render_template('home/ana_price2.html', to_se_data=to_se_data)
    else:
        return render_template('home/ana_price2.html' ,to_se_data=to_se_data)

@home.route('/ana_price3', methods=['GET', "POST"])
@home_login
def ana_price3():
    if request.method == "GET":
        return render_template('home/ana_price3.html')
    else:
        from_web_data = request.form
        print(from_web_data['keyword'])
        hotel_data = add3(from_web_data['keyword'])
        if hotel_data != None:
            print(json.loads(hotel_data))
            price_ranges = {
                '4A': 0,
                '5A': 0,

            }
            for hotel in json.loads(hotel_data):
                price = hotel['sightLevelStr']
                if price =='5A' :
                    price_ranges['5A'] += 1
                else:
                    price_ranges['4A'] += 1
            # print(price_ranges)
            to_e_data = []
            for key, value in price_ranges.items():
                to_e_data.append({'name': key, 'value': value})
            return render_template('home/ana_price3.html', code=200, hotel_data=json.loads(hotel_data),
                                   to_e_data=to_e_data)
        else:
            error = '暂无数据'
            return render_template('home/ana_price3.html', error=error)
# def dis_time_hot():
#     time_hot_data = deal_hot_time()
#     time_hot_data1 = deal_hot_time1()
#     to_se_data = {}
#     to_se_data['xdata'] = []
#     to_se_data['ydata'] = []
#     to_se_data['pix_data'] = []
#     for hot_data in time_hot_data:
#         to_se_data['xdata'].append(hot_data['lastModifyTime'])
#         to_se_data['ydata'].append(hot_data['content'])
#         to_se_data['pix_data'].append({
#             'name': hot_data['lastModifyTime'],
#             'value': hot_data['content']
#         })
#     to_se_data1 = {}
#     to_se_data1['xdata'] = []
#     to_se_data1['ydata'] = []
#     to_se_data1['pix_data'] = []
#     for k,v in time_hot_data1.items():
#         to_se_data1['xdata'].append(k)
#         to_se_data1['ydata'].append(v)
#         to_se_data1['pix_data'].append({
#             'name':k,
#             'value':v
#         })
#     print("查看一下数据！！！")
#     print(to_se_data1)
#     return render_template('home/dis_time_hot.html', to_se_data=to_se_data,to_se_data1 = to_se_data1)
@home.route('/add', methods=['GET', "POST"])
@home_login
def add():
    return  render_template('home/index1.html')

@home.route('/data_1', methods=['GET', "POST"])
@home_login
def add1():
    return data_deal_1()
@home.route('/data_2', methods=['GET', "POST"])
@home_login
def get_addpie_chart2():
    return data_deal_2()
@home.route('/data_3', methods=['GET', "POST"])
@home_login
def get_addpie_chart3():
    return data_deal_3()
@home.route('/data_4', methods=['GET', "POST"])
@home_login
def get_addpie_chart4():
    return data_deal_4()
@home.route('/data_5', methods=['GET', "POST"])
@home_login
def get_addpie_chart5():
    return data_deal_5()
@home.route('/data_7', methods=['GET', "POST"])
@home_login
def get_addpie_chart6():
    return data_deal_7()
@home.route('/data_7', methods=['GET', "POST"])
@home_login
def get_addpie_chart7():
    return data_deal_7()