import json
import re

from flask import Flask, send_file, jsonify, g
from tensorflow import keras
from sklearn.metrics import mean_squared_error as mse # mse
from sklearn.metrics import mean_absolute_error as mae # mae
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from flask import request
from flask import jsonify
from scipy.special import softmax
from transformers import AutoTokenizer,AutoModelForSequenceClassification
import os
import csv
from statsmodels.tsa.stattools import adfuller
from sklearn.metrics import r2_score
from skimage.restoration import denoise_wavelet
import time

app = Flask(__name__)

baseSavePath = "/Users/fanyc/PycharmProjects/pytorch_test/MyPrediction/RegressionModel/deeplearning_model"

app.config['counter'] = 0

@app.route('/')
def index():
    num = app.config['counter']
    num += 10
    return f'Request Count: {num}'

@app.route('/add')
def indexAdd():
    num1 = app.config['counter']
    return f'Request Count: {num1}'

@app.route('/add2')
def indexAdd2():
    g.counter += 20
    return f'Request Count: {g.counter}'


@app.route("/getdata")
def model_predict():
    model = keras.models.load_model('./pretraining_models')
    X2_test, y2_test = data_precess()
    y2_pred = model.predict(X2_test).flatten()
    y2_pred_before_inverse = model.predict(X2_test)
    from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
    return {'mse_val': mse(y2_test, y2_pred), 'r2_val': r2_score(y2_test, y2_pred), 'mae_val': mae(y2_test, y2_pred)}

def data_precess():
    df = pd.read_csv('../data_process/big_data.csv')
    dates = pd.to_datetime(df['date'])
    cols = list(df)[1:10]
    df = df[cols].astype(float)
    scaler = StandardScaler()
    scaler = scaler.fit(df)
    df_scaled = scaler.transform(df)
    df_scaled = pd.DataFrame(df_scaled, columns=['close', 'total_cases', 'new_cases_smoothed', 'total_deaths',
                                                 'new_deaths_smoothed', 'stringency_index', 'open', 'highest',
                                                 'lowest'])
    x2, y2 = df_to_X_y(df_scaled, window_size=7)
    X2_train, y2_train = x2[:558], y2[:558]
    X2_val, y2_val = x2[558:744], y2[558:744]
    X2_test, y2_test = x2[744:], y2[744:]
    return X2_test, y2_test


def df_to_X_y(df, window_size=14):
  df_as_np = df.to_numpy()
  X = []
  y = []
  for i in range(len(df_as_np)-window_size):
    row = [r for r in df_as_np[i:i+window_size]]
    X.append(row)
    label = df_as_np[i+window_size][0]
    y.append(label)
  return np.array(X), np.array(y)

def getStockTestDataset(filePath):
    df = pd.read_csv(filePath)
    dates = pd.to_datetime(df['date'])
    cols = list(df)[1:10]
    df = df[cols].astype(float)
    scaler = StandardScaler()
    scaler = scaler.fit(df)
    df_scaled = scaler.transform(df)
    df_scaled = pd.DataFrame(df_scaled, columns=['close', 'total_cases', 'new_cases_smoothed', 'total_deaths',
                                                 'new_deaths_smoothed', 'stringency_index', 'open', 'highest',
                                                 'lowest'])
    x2, y2 = df_to_X_y(df_scaled, window_size=7)
    X2_test, y2_test = x2[744:], y2[744:]
    return X2_test, y2_test

def getVirusTestDataset(filePath):
    df = pd.read_csv(filePath)
    count = df.shape[0]
    testBegin = int(count * 0.85)
    col = df.columns[1:]
    dates = pd.to_datetime(df['date'])
    cols = list(df)[1:]
    df = df[cols].astype(float)
    scaler = StandardScaler()
    scaler = scaler.fit(df)
    df_scaled = scaler.transform(df)
    df_scaled = pd.DataFrame(df_scaled, columns=col)
    x2, y2 = df_to_X_y(df_scaled, window_size=7)
    X2_test, y2_test = x2[testBegin:], y2[testBegin:]
    return X2_test, y2_test

def adf_test(timeseries):
    print('ADF检验结果:')
    result = []
    dftest = adfuller(timeseries, autolag='AIC')
    dfoutput = pd.Series(dftest[0:4],
                         index=['Test Statistic', 'p-value', 'Number of Lags Used', 'Number of Observations Used'])
    result.append(dfoutput['Test Statistic'])
    result.append(dfoutput['Test Statistic'])
    result.append(dfoutput['Test Statistic'])
    result.append(dfoutput['Test Statistic'])
    for key, value in dftest[4].items():
        dfoutput['Critical Value (%s)' % key] = value
        result.append(value)
    return dfoutput, result


def read_csv_column(file_path, column_name):
    with open(file_path, 'r', newline='', encoding='utf-8') as csvfile:
        reader = csv.DictReader(csvfile)

        # 检查列名是否存在
        if column_name not in reader.fieldnames:
            print(f"Column '{column_name}' not found.")
            return None

        # 读取指定列的值
        column_values = [row[column_name] for row in reader]

    return column_values

def polarity_roberta_classification(example):
    MODEL = f"cardiffnlp/twitter-roberta-base-sentiment"
    tokenizer = AutoTokenizer.from_pretrained(MODEL)
    model = AutoModelForSequenceClassification.from_pretrained(MODEL)
    encode_text = tokenizer(example, return_tensors='pt')
    output = model(**encode_text)
    scores = output[0][0].detach().numpy()
    scores = softmax(scores)
    # scores[0]为neg（消极），scores[1]为neu（中性），scores[2]为pos（积极），
    scores_dict = [scores[0], scores[1], scores[2]]
    return scores_dict

def getScoreResult(scoreList):
    maxIndex = scoreList.index(max(scoreList))
    if maxIndex == 0:
        return '消极'
    if maxIndex == 1:
        return '中性'
    if maxIndex == 2:
        return '积极'

@app.route('/test/score', methods=['POST'])
def sentenceScore():
    words = request.form['sentence']
    result = polarity_roberta_classification(words)
    return json.dumps(str(result))

@app.route('/test/csv/score', methods=['POST'])
def sentenceCsv():
    data = []
    result = []
    scoreResult = []
    score = []
    text = []
    if request.method == 'POST':
        if request.files:
            uploaded_file = request.files['filename']  # This line uses the same variable and worked fine
            uploaded_file.save(os.path.join(app.config['FILE_UPLOADS'], 'sentences.csv' ))
            filePath = os.path.join(app.config['FILE_UPLOADS'], 'sentences.csv')
            # f = request.form['filename']  # This is the line throwing the error
            with open(filePath) as file:
                csv_file = csv.reader(file)
                for row in csv_file:
                    data.append(row)
    # return render_template('index.html', data=data)
    flatten_arr = sum(data, [])
    for i in range(0, len(flatten_arr)):
        scoreList = polarity_roberta_classification(flatten_arr[i])
        result.append({
            "text": flatten_arr[i],
            "score": scoreList
        })
        text.append(flatten_arr[i])
        scoreResult.append(getScoreResult(scoreList))
        score.append(scoreList)
    df = pd.DataFrame({'text':text, 'score_result':score, 'classification': scoreResult})
    df.to_csv('./file-upload/result.csv', index=False, sep=',')
    return json.dumps(str(result))

@app.route('/test/download')
def download():
    return send_file('./file-upload/result.csv', as_attachment=True)


@app.route('/test/getseries', methods=['POST'])
def checkSeries():
    result = dict()
    if request.files:
        uploaded_file = request.files['filename']
        uploaded_file.save(os.path.join(app.config['SERIES_UPLOADS'], 'series.csv'))
        filePath = os.path.join(app.config['SERIES_UPLOADS'], 'series.csv')
        with open(filePath) as file:
            csv_file = csv.reader(file)
            header_row = next(csv_file)
            result["title"] = header_row
        header_title = result.get("title")
        for aTitle in header_title:
            result[aTitle] = read_csv_column(filePath, aTitle)
    return jsonify(result)
# @app.route('/test/file', methods=['POST'])
# def sentenceScore():
#     return ''

@app.route('/test/getdiff', methods=['GET'])
def getDiffSeries():
    result = dict()
    filePath = os.path.join(app.config['SERIES_UPLOADS'], 'series.csv')
    df = pd.read_csv('./file-upload/series-upload/series.csv')
    with open(filePath) as file:
        csv_file = csv.reader(file)
        header_row = next(csv_file)
        result["title"] = header_row
    header_title = result.get("title")
    # print('查看类型', type(df['close']), type(read_csv_column(filePath, 'close')))
    for aTitle in header_title:
        if aTitle == 'date':
            continue
        result[aTitle] = np.diff(df[aTitle]).tolist()
        _, resultList= adf_test(np.diff(df[aTitle]))
        result[aTitle + '_adf'] = resultList
    return jsonify(result)

@app.route('/test/getdenoisy', methods=['GET'])
def getDenoisyData():
    result = dict()
    filePath = os.path.join(app.config['SERIES_UPLOADS'], 'series.csv')
    df = pd.read_csv('./file-upload/series-upload/series.csv')
    with open(filePath) as file:
        csv_file = csv.reader(file)
        header_row = next(csv_file)
        result["title"] = header_row
    header_title = result.get("title")
    for aTitle in header_title:
        if aTitle == 'date':
            continue
        print('查看降噪前数据', df[aTitle])
        data_denoise = denoise_wavelet(df[aTitle], method='BayesShrink', mode='soft', wavelet_levels=2, wavelet='sym8', rescale_sigma='True')
        result[aTitle] = data_denoise.tolist()
    return jsonify(result)


@app.route('/test/premodel/result', methods=['POST'])
def getPremodelResult():
    result = dict()
    typeName = request.form['type']
    modelName = request.form['modelname']
    file = request.files['filename']
    virus = request.form['virus']
    modelPath = './pretraining_models'

    if typeName == '病毒模型':
        modelPath += '/virus/' + virus + '/' + modelName
        file.save(os.path.join(app.config['VIRUS_UPLOADS'] + '/' + virus + '/', 'virus_data.csv'))
        model = keras.models.load_model(modelPath)
        x2_test, y2_test = getVirusTestDataset(app.config['VIRUS_UPLOADS'] + '/' + virus + '/virus_data.csv')
        y2_pred = model.predict(x2_test).flatten()
        result['y_pred'] = y2_pred.tolist()
        result['y_test'] = y2_test.tolist()
        result['mse'] = mse(y2_test, y2_pred)
        result['mae'] = mae(y2_test, y2_pred)
        result['r2'] = r2_score(y2_test, y2_pred)
        return jsonify(result)
    if typeName == '金融模型':
        modelPath += '/stock/' + modelName
        file.save(os.path.join(app.config['STOCK_UPLOADS'], 'big_data.csv'))
        model = keras.models.load_model(modelPath)
        x2_test, y2_test = getStockTestDataset(os.path.join(app.config['STOCK_UPLOADS'] + '/big_data.csv'))
        y2_pred = model.predict(x2_test).flatten()
        print('查看误差', r2_score(y2_test, y2_pred), mse(y2_test, y2_pred))
        result['y_pred'] = y2_pred.tolist()
        result['y_test'] = y2_test.tolist()
        result['mse'] = mse(y2_test, y2_pred)
        result['mae'] = mae(y2_test, y2_pred)
        result['r2'] = r2_score(y2_test, y2_pred)
        return jsonify(result)
    return '未命中接口'


# @app.route('/test/model/result', methods=['POST'])
# def getModelResult():
#     result = dict()
#     forecastLength = request.form['forecast']
#     modelName = request.form['modelname']
#     windowLength = request.files['windows']
#     uploaded_file = request.files['filename']
#     uploaded_file.save(os.path.join(app.config['MODEL_TRAIN_UPLOADS'], 'dataset.csv'))
#     filePath = os.path.join(app.config['MODEL_TRAIN_UPLOADS'], 'dataset.csv')
#     df = pd.read_csv(filePath)
#     dates = pd.to_datetime(df['date'])
#     cols = list(df)[1:]
#     df = df[cols].astype(float)
#     scaler = StandardScaler()
#     scaler = scaler.fit(df)
#     df_scaled = scaler.transform(df)
#     df_scaled = pd.DataFrame(df_scaled, columns=['close', 'total_cases', 'new_cases_smoothed', 'total_deaths',
#                                                  'new_deaths_smoothed', 'stringency_index', 'open', 'highest',
#                                                  'lowest'])
#     x2, y2 = df_to_X_y(df_scaled, window_size=windowLength)
#
#     return 0

app.config['FILE_UPLOADS'] =  baseSavePath + "/file-upload"
app.config['SERIES_UPLOADS'] = baseSavePath + "/file-upload/series-upload"
app.config['STOCK_UPLOADS'] = baseSavePath + "/file-upload/stock"
app.config['VIRUS_UPLOADS'] = baseSavePath + "/file-upload/virus"
app.config['MODEL_TRAIN_UPLOADS'] = baseSavePath + "/file-upload/model-train"

def numerical_sort(string):
    numbers = re.findall(r'\d+', string)
    return int(numbers[0]) if numbers else None

@app.route('/test/analysis', methods=['PUT'])
def analysisResult():
    if len(os.listdir("./analysis_data")) > 5:
        #   文件夹中的文件超过5个时，删除最早保存的两个
        timeList = []
        for i in os.listdir("./analysis_data"):
            timeList.append(i.split(".")[0])
        # 按照时间戳排序
        sortedList = sorted(timeList, key=numerical_sort)
        for i in sortedList[:2]:
            print('查看待删除的文件名：{}'.format(i))
            deletingFilePath = "./analysis_data/{name}.txt".format(name=i)
            os.remove(deletingFilePath)

    #   用时间戳命名文件并保存
    timeStamp = int(time.time())
    filePath = "./analysis_data/{time}.txt".format(time=timeStamp)
    file = open(filePath, 'w')
    file.close()
    return jsonify(timeStamp)

if __name__ == '__main__':
     app.run()

