#pragma once
#include <iostream>
#include <vector>
#include <cmath>
#include <algorithm>
#include <numeric>
#include <random>
#include <chrono>
#include <unordered_set>
#include <unordered_map>
#include <immintrin.h>

class AIPredictor {
public:
    uint32_t daysLearn = 10;
    uint32_t daysLast = 5;
private:
    std::vector<std::vector<double>> weights;
    std::vector<double> bias;
    double learning_rate;
    int epochs;
    double lambda;

    std::vector<double> mean;
    std::vector<double> std;
    std::vector<int> class_labels;
    std::unordered_map<int, size_t> label_to_index;


public:
    AIPredictor(double lr = 0.01, int ep = 1024, double reg = 0.01)
        : learning_rate(lr), epochs(ep), lambda(reg) {
    }

    std::vector<double> softmax(const std::vector<double>& logits) {
        std::vector<double> probs(logits.size());
        double max_logit = *std::max_element(logits.begin(), logits.end());
        double sum_exp = 0.0;
        for (size_t i = 0; i < logits.size(); ++i) {
            probs[i] = std::exp(logits[i] - max_logit);
            sum_exp += probs[i];
        }
        for (auto& prob : probs) {
            prob /= sum_exp;
        }
        return probs;
    }

    void prepare_data(
        std::vector<std::shared_ptr<Subject>> subjects,
        std::vector<std::vector<double>>& train_features,
        std::vector<int>& train_labels,
        double test_ratio = 0.2)
    {
        uint32_t inputDays = daysLearn - daysLast;
        for (auto& subjectIt : subjects)
        {
            uint32_t subjectLength = subjectIt->closedPrices.size();
            if (subjectLength < daysLearn)
            {
                continue;
            }
            for (uint32_t i = 0; i < subjectLength - daysLearn; i++)
            {
                uint32_t j = 0;
                std::vector<double> input;

                auto startPrices0 = subjectIt->startPrices[i];
                auto closedPrices0 = subjectIt->closedPrices[i];
                auto highPrices0 = subjectIt->highPrices[i];
                auto lowPrices0 = subjectIt->lowPrices[i];
                auto tradedNum0 = subjectIt->tradedNum[i];

                for (j = i; j < i + inputDays; j++)
                {
                    input.push_back(subjectIt->startPrices[j] / startPrices0);
                    input.push_back(subjectIt->closedPrices[j] / closedPrices0);
                    input.push_back(subjectIt->highPrices[j] / highPrices0);
                    input.push_back(subjectIt->lowPrices[j] / lowPrices0);
                    input.push_back(subjectIt->tradedNum[j] / tradedNum0);
                }
                auto dataLast2Idx = j + 0;
                auto dataLast1Idx = j + daysLast - 1;
                auto last2High = subjectIt->highPrices[dataLast2Idx];
                auto last1Open = subjectIt->startPrices[dataLast1Idx];
                double incRate = (last1Open - last2High) / last2High;
                //double lable = std::round(incRate * 100.0);
                double lable = incRate > 0.0 ? 1 : 0;

                train_labels.push_back(lable);
                train_features.push_back(input);
            }
        }
    }

    void normalize(std::vector<std::vector<double>>& features) {
        const size_t n_samples = features.size();
        const size_t n_features = features[0].size();

        mean.resize(n_features, 0.0);
        std.resize(n_features, 0.0);

        for (const auto& sample : features) {
            for (size_t j = 0; j < n_features; ++j) {
                mean[j] += sample[j];
            }
        }
        for (auto& m : mean) m /= n_samples;

        for (const auto& sample : features) {
            for (size_t j = 0; j < n_features; ++j) {
                std[j] += std::pow(sample[j] - mean[j], 2);
            }
        }
        for (auto& s : std) s = std::sqrt(s / n_samples);

        for (auto& sample : features) {
            for (size_t j = 0; j < n_features; ++j) {
                sample[j] = (sample[j] - mean[j]) / (std[j] + 1e-8);
            }
        }
    }

    void train(const std::vector<std::vector<double>>& features,
        const std::vector<int>& labels)
    {
        std::set<int> unique_labels(labels.begin(), labels.end());
        class_labels.assign(unique_labels.begin(), unique_labels.end());

        for (size_t i = 0; i < class_labels.size(); ++i) {
            label_to_index[class_labels[i]] = i;
        }

        const uint32_t stopStride = 100;
        double last_total_loss = 0.0;
        uint32_t sameCount = 0;

        const size_t n_samples = features.size();
        const size_t n_features = features[0].size();
        const size_t n_classes = class_labels.size();

        weights.resize(n_classes, std::vector<double>(n_features, 0.0));
        bias.resize(n_classes, 0.0);

        std::random_device rd;
        std::mt19937 gen(rd());
        std::uniform_real_distribution<> dis(-0.01, 0.01);
        for (auto& w : weights) {
            for (auto& v : w) v = dis(gen);
        }
        for (auto& b : bias) b = dis(gen);

        std::vector<std::vector<double>> weight_momentum(n_classes, std::vector<double>(n_features, 0.0));
        std::vector<double> bias_momentum(n_classes, 0.0);
        const double beta = 0.9;

        for (int ep = 0; ep < epochs; ++ep) {
            double total_loss = 0.0;
            std::vector<std::vector<double>> grad_weights(n_classes, std::vector<double>(n_features, 0.0));
            std::vector<double> grad_bias(n_classes, 0.0);

            for (size_t i = 0; i < n_samples; ++i) {
                std::vector<double> logits(n_classes);
                for (size_t c = 0; c < n_classes; ++c) {
                    __m256d sum = _mm256_setzero_pd();
                    size_t j;
                    for (j = 0; j + 3 < n_features; j += 4) {
                        __m256d w = _mm256_loadu_pd(&weights[c][j]);
                        __m256d f = _mm256_loadu_pd(&features[i][j]);
                        sum = _mm256_add_pd(sum, _mm256_mul_pd(w, f));
                    }
                    double temp[4];
                    _mm256_storeu_pd(temp, sum);
                    logits[c] = temp[0] + temp[1] + temp[2] + temp[3];

                    // Process remain elements.
                    for (; j < n_features; ++j) {
                        logits[c] += weights[c][j] * features[i][j];
                    }
                    logits[c] += bias[c];
                }
                std::vector<double> probs = softmax(logits);
                size_t label_index = label_to_index[labels[i]];

                double loss = -std::log(probs[label_index] + 1e-8);
                total_loss += loss;

                for (size_t c = 0; c < n_classes; ++c) {
                    double delta = probs[c] - (c == label_index ? 1.0 : 0.0);
                    for (size_t j = 0; j < n_features; ++j) {
                        grad_weights[c][j] += delta * features[i][j] + lambda * weights[c][j];
                    }
                    grad_bias[c] += delta;
                }
            }

            for (size_t c = 0; c < n_classes; ++c) {
                for (size_t j = 0; j < n_features; ++j) {
                    weight_momentum[c][j] = beta * weight_momentum[c][j] +
                        (1 - beta) * grad_weights[c][j] / n_samples;
                    weights[c][j] -= learning_rate * weight_momentum[c][j];
                }
                bias_momentum[c] = beta * bias_momentum[c] +
                    (1 - beta) * grad_bias[c] / n_samples;
                bias[c] -= learning_rate * bias_momentum[c];
            }

            if (std::abs(last_total_loss - total_loss) < 1e-6) {
                sameCount++;
                if (stopStride == sameCount) {
                    break;
                }
            }
            else {
                sameCount = 0;
                last_total_loss = total_loss;
            }

            if (ep % 10 == 0) {
                // std::cout << "Epoch " << ep << " | Loss: " << total_loss / n_samples << " | LR: " << learning_rate << std::endl;
            }
        }
    }

    int predict(const std::vector<double>& features) {
        std::vector<double> normalized = features;
        for (size_t j = 0; j < features.size(); ++j) {
            normalized[j] = (features[j] - mean[j]) / (std::max(std[j], 1e-8));
        }
        std::vector<double> logits(weights.size());
        for (size_t c = 0; c < weights.size(); ++c) {
            logits[c] = std::inner_product(weights[c].begin(), weights[c].end(),
                normalized.begin(), bias[c]);
        }
        std::vector<double> probs = softmax(logits);
        size_t max_index = std::distance(probs.begin(), std::max_element(probs.begin(), probs.end()));
        return class_labels[max_index];
    }

    void save(const std::string& filename) {
        std::ofstream file(filename, std::ios::binary);
        if (!file.is_open()) return;

        file.write(reinterpret_cast<const char*>(&daysLearn), sizeof(daysLearn));
        file.write(reinterpret_cast<const char*>(&daysLast), sizeof(daysLast));

        file.write(reinterpret_cast<const char*>(&learning_rate), sizeof(learning_rate));
        file.write(reinterpret_cast<const char*>(&epochs), sizeof(epochs));
        file.write(reinterpret_cast<const char*>(&lambda), sizeof(lambda));

        save_vector2d(file, weights);

        save_vector(file, bias);
        save_vector(file, mean);
        save_vector(file, std);
        save_vector(file, class_labels);

        size_t map_size = label_to_index.size();
        file.write(reinterpret_cast<const char*>(&map_size), sizeof(map_size));
        for (const auto& pair : label_to_index) {
            file.write(reinterpret_cast<const char*>(&pair.first), sizeof(pair.first));
            file.write(reinterpret_cast<const char*>(&pair.second), sizeof(pair.second));
        }
    }

    void load(const std::string& filename) {
        std::ifstream file(filename, std::ios::binary);
        if (!file.is_open()) return;

        file.read(reinterpret_cast<char*>(&daysLearn), sizeof(daysLearn));
        file.read(reinterpret_cast<char*>(&daysLast), sizeof(daysLast));

        file.read(reinterpret_cast<char*>(&learning_rate), sizeof(learning_rate));
        file.read(reinterpret_cast<char*>(&epochs), sizeof(epochs));
        file.read(reinterpret_cast<char*>(&lambda), sizeof(lambda));

        load_vector2d(file, weights);

        load_vector(file, bias);
        load_vector(file, mean);
        load_vector(file, std);
        load_vector(file, class_labels);

        size_t map_size;
        file.read(reinterpret_cast<char*>(&map_size), sizeof(map_size));
        for (size_t i = 0; i < map_size; ++i) {
            int key;
            size_t value;
            file.read(reinterpret_cast<char*>(&key), sizeof(key));
            file.read(reinterpret_cast<char*>(&value), sizeof(value));
            label_to_index[key] = value;
        }
    }

private:
    template<typename T>
    void save_vector(std::ofstream& file, const std::vector<T>& vec) {
        size_t size = vec.size();
        file.write(reinterpret_cast<const char*>(&size), sizeof(size));
        file.write(reinterpret_cast<const char*>(vec.data()), size * sizeof(T));
    }

    template<typename T>
    void load_vector(std::ifstream& file, std::vector<T>& vec) {
        size_t size;
        file.read(reinterpret_cast<char*>(&size), sizeof(size));
        vec.resize(size);
        file.read(reinterpret_cast<char*>(vec.data()), size * sizeof(T));
    }

    void save_vector2d(std::ofstream& file, const std::vector<std::vector<double>>& vec) {
        size_t outer_size = vec.size();
        file.write(reinterpret_cast<const char*>(&outer_size), sizeof(outer_size));
        for (const auto& inner : vec) {
            save_vector(file, inner);
        }
    }

    void load_vector2d(std::ifstream& file, std::vector<std::vector<double>>& vec) {
        size_t outer_size;
        file.read(reinterpret_cast<char*>(&outer_size), sizeof(outer_size));
        vec.resize(outer_size);
        for (auto& inner : vec) {
            load_vector(file, inner);
        }
    }

};
