import torch
import csv
import threading
import time
from flask import Flask, request, jsonify
from flask_cors import CORS
from flask_migrate import Migrate
from flasgger import Swagger
from blueprints import blueprint_list
from config import config_lab as base_config
from components import error_handler
from components import db
from utils.train_model import SimpleNN
from datetime import datetime
import warnings

from models.status import SensorData

warnings.filterwarnings("ignore")
app = Flask(__name__)
app.debug = False
# 开启跨域支持
cors = CORS(app)
# 导入自定义配置
app.config.from_object(base_config)
app.config['SWAGGER'] = {
    'title': '北京化工大学信息学院',
    'description': 'API for SDG_KG',
    'version': '0.0.1',
    'contact': {
        'name': 'Zhiyi Li',
        'email': '2022210522@buct.edu.cn',
        'url': 'https://www.buct.edu.cn/'
    },
    'license': {
        'name': 'Apache 2.0',
        'url': 'https://www.apache.org/licenses/LICENSE-2.0.html'
    }
}
Swagger(app)
# 初始化数据库
db.init_app(app)
# 数据库迁移
# 数据库映射三步,DRM改变只需要执行23
# flask db init 只需执行一次
# flask db migrate 识别DRM改变，生成迁移脚本
# flask db upgrade 运行脚本，同步数据库，只有正在使用的model会同步
migrate = Migrate(app, db)
# 注册所有蓝图
[app.register_blueprint(blueprint) for blueprint in blueprint_list]
# 注册异常
app.register_error_handler(Exception, error_handler)


monitoring_flag = False
monitoring_thread = None

def load_model():
    model = SimpleNN(input_size=13, output_size=2)
    model.load_state_dict(torch.load('static/model.pth', map_location='cpu'))
    model.eval()
    return model

def parse_datetime(raw):
    if '.' in raw:
        raw = raw.split('.')[0]
    return datetime.strptime(raw, '%Y-%m-%d %H:%M:%S')

def start_background_monitoring():
    global monitoring_flag
    model = load_model()
    with app.app_context():  # ✅ 添加这行，确保线程中有 Flask 上下文
        try:
            with open('static/sensor_data.csv', 'r', encoding='utf-8') as f:
                reader = list(csv.DictReader(f))
                monitor_rows = reader[100:200]  # 只处理第101~200条
                for row in monitor_rows:
                    if not monitoring_flag:
                        break

                    dt = parse_datetime(row['Datetime'])

                    input_cols = [
                        'FIC1007', 'FIC1001A', 'FIC1001B', 'FIC1001C', 'FIC1001D',
                        'FIC1002A', 'FIC1002B', 'FIC1002C', 'FIC1002D',
                        'TIC1001', 'TIC1005', 'TIC1008','TIC2005'
                    ]
                    x = [float(row[col]) for col in input_cols]
                    x_tensor = torch.tensor([x], dtype=torch.float32)

                    with torch.no_grad():
                        y_pred = model(x_tensor).numpy()[0]

                    record = SensorData(
                        datetime=dt,
                        FIC1007=x[0], FIC1001A=x[1], FIC1001B=x[2], FIC1001C=x[3], FIC1001D=x[4],
                        FIC1002A=x[5], FIC1002B=x[6], FIC1002C=x[7], FIC1002D=x[8],
                        TIC1001=x[9], TIC1005=x[10], TIC1008=x[11],TIC2005=x[12],
                        FIC1008=float(y_pred[0]), FI615=float(y_pred[1])
                    )
                    db.session.add(record)
                    db.session.commit()

                    time.sleep(3)  # 每3秒一次

        except Exception as e:
            print(f"[监测线程异常]：{e}")
        finally:
            monitoring_flag = False  # 自动停止标志位
@app.route('/data')
def get_data():
    records = SensorData.query.all()
    return jsonify([r.to_dict() for r in records])
@app.route('/clear')
def clear_data():
    try:
        num = SensorData.query.delete()  # 删除所有行
        db.session.commit()
        return jsonify({'success': True, 'deleted_rows': num})
    except Exception as e:
        db.session.rollback()
        return jsonify({'success': False, 'error': str(e)}), 500
@app.route('/start_monitor')
def start_monitor():
    global monitoring_thread, monitoring_flag

    db.create_all()
    try:
        with open('static/sensor_data.csv', 'r', encoding='utf-8') as f:
            reader = list(csv.DictReader(f))[:100]

        records = []
        for row in reader:
            dt = parse_datetime(row['Datetime'])
            record = SensorData(
                datetime=dt,
                FIC1007=float(row['FIC1007']),
                FIC1001A=float(row['FIC1001A']),
                FIC1001B=float(row['FIC1001B']),
                FIC1001C=float(row['FIC1001C']),
                FIC1001D=float(row['FIC1001D']),
                FIC1002A=float(row['FIC1002A']),
                FIC1002B=float(row['FIC1002B']),
                FIC1002C=float(row['FIC1002C']),
                FIC1002D=float(row['FIC1002D']),
                TIC1001=float(row['TIC1001']),
                TIC1005=float(row['TIC1005']),
                TIC1008=float(row['TIC1008']),
                TIC2005=float(row['TIC2005']),
                FIC1008=float(row['FIC1008']),
                FI615=float(row['FI615']),
            )
            records.append(record)

        db.session.bulk_save_objects(records)
        db.session.commit()
        app.config['MONITORING_START_TIME'] = parse_datetime(reader[-1]['Datetime'])

        if not monitoring_flag:
            monitoring_flag = True
            monitoring_thread = threading.Thread(target=start_background_monitoring)
            monitoring_thread.start()

        return jsonify({'success': True, 'message': '初始化100条数据已写入，监测线程已启动（最多追加100条预测数据）'})
    except Exception as e:
        db.session.rollback()
        return jsonify({'success': False, 'error': str(e)}), 500
@app.route('/get_point_data', methods=['GET'])
def get_point_data():
    point = request.args.get('point')
    if point not in SensorData.__table__.columns:
        return jsonify({'success': False, 'error': f'字段 {point} 不存在'}), 400

    try:
        # 假设你有一个全局变量或配置记录了监测开始时间
        monitoring_start_time = app.config.get('MONITORING_START_TIME')
        if monitoring_start_time is None:
            return jsonify({'success': False, 'error': '监测开始时间未设置'}), 400

        # 取最新100条，时间倒序
        latest_100 = SensorData.query.order_by(SensorData.datetime.desc()).limit(100).all()
        latest_100_sorted = sorted(latest_100, key=lambda r: r.datetime)

        # 区分历史和预测
        history = [r for r in latest_100_sorted if r.datetime <= monitoring_start_time]
        predict = [r for r in latest_100_sorted if r.datetime > monitoring_start_time]

        def to_dict(records):
            return [{'datetime': r.datetime.isoformat(), 'value': getattr(r, point)} for r in records]

        return jsonify({
            'success': True,
            'point': point,
            'history': to_dict(history),
            'predict': to_dict(predict),
        })

    except Exception as e:
        return jsonify({'success': False, 'error': str(e)}), 500
@app.route('/get_latest_point_data', methods=['GET'])
def get_latest_point_data():
    point = request.args.get('point')

    # 检查字段是否存在于模型中
    if point not in SensorData.__table__.columns:
        return jsonify({'success': False, 'error': f'字段 {point} 不存在'}), 400

    try:
        # 查询最近100条数据（按时间倒序），再升序排序
        records = (
            SensorData.query
            .order_by(SensorData.datetime.desc())
            .limit(100)
            .all()
        )

        # 升序排列这些记录
        records = sorted(records, key=lambda r: r.datetime)

        # 提取目标点位数据
        data = [
            {
                'datetime': r.datetime.isoformat(),
                'value': getattr(r, point)
            }
            for r in records
        ]

        return jsonify({
            'success': True,
            'point': point,
            'data': data
        })

    except Exception as e:
        return jsonify({'success': False, 'error': str(e)}), 500


@app.route('/')
def hello_world():
    return 'Hello World!'

if __name__ == '__main__':
    # dev
    app.run(debug=False)
