#include <vector>
#include <iostream>
#include <thread>
#include <cmath>
#include "utils.h"

void label_to_onehot(int label_nums, float *onehot, float *labels, int label_size){
    for (int i = 0; i < label_size; i++){
        for (int j = 0; j < label_nums; j++){
            if (labels[i] == j){
                onehot[i * label_nums + j] = 1;
            }
            else{
                onehot[i * label_nums + j] = 0;
            }
        }
    }
}

void softmax_kernel(float *input, unsigned int size, float *output) {
    float sum = 0.0f;
    for (int i = 0; i < size; i++) {
        sum += exp(input[i]);
    }

    for (int i = 0; i < size; i++) {
        output[i] = exp(input[i]) / sum;
    }
    
}

struct SoftmaxArgs {
    int start;
    int end;
    float *input;
    int size;
    float *output;
    float *temp;
}*s;

void softmax_thread_function(SoftmaxArgs *s) {
    for (int i = s->start; i < s->end; i++) {
        int sw = i * s->size; 
        float *inptr = &s->input[sw];
        float *outptr = &s->output[sw];
        softmax_kernel(inptr, s->size, outptr);
    }
}

void softmax(float *input, int x_size, int y_size, float *output) {
    int thread_num = std::thread::hardware_concurrency();
    std::vector<std::thread> threads(thread_num);
    std::vector<SoftmaxArgs> mes(thread_num);
    int blocksdim;
    if (x_size%thread_num != 0) {
        if (x_size < thread_num) {
            for (int i = 0; i < x_size; i++) {
                int end = i + 1;
                int start = i;
                mes[i] = SoftmaxArgs{start, end, input, y_size, output, NULL};
            }
        }else{
            blocksdim = x_size/(thread_num -1);
            for (int i = 0; i < thread_num; i++) {
                int end;
                if (i == thread_num - 1) {
                    end = x_size;
                }
                else{
                    end = blocksdim * (i + 1);
                }
                int start = blocksdim * i;
                
                mes[i] = SoftmaxArgs{start, end, input, y_size, output, NULL};
            }
        }
    }else{
        blocksdim = x_size/thread_num;

        for (int i = 0; i < thread_num; i++) {
                int end = blocksdim * (i + 1);
                int start = blocksdim * i;
                mes[i] = SoftmaxArgs{start, end, input, y_size, output, NULL};
            }
    }
    if (x_size < thread_num) {
        thread_num = x_size;
    }
    for (int i = 0; i < thread_num; i++) {
        threads[i] = std::thread(softmax_thread_function, &mes[i]);
    }
    for (int i = 0; i < thread_num; i++) {
        threads[i].join();
    }
}

float ce_loss_kernel(float *input, float *true_labels, int label_nums){
    float out = 0.0f;
    for (int i = 0; i < label_nums; i++) {
        out += -log(input[i]) * true_labels[i];
        }
    return out;
}

void celoss_thread_function(SoftmaxArgs *s) {
    for (int i = s->start; i < s->end; i++){
        int sw = i * s->size; 
        float *inptr = &s->input[sw];
        float *true_lable = &s->temp[sw];
        s->output[i] = ce_loss_kernel(inptr, true_lable, s->size);
    }  
}

void celoss(float *input, float *labels, int label_nums, int label_size, float *output) {
    int thread_num = std::thread::hardware_concurrency();
    std::vector<std::thread> threads(thread_num);
    std::vector<SoftmaxArgs> mes(thread_num);
    int blocksdim;
    if (label_size%thread_num != 0) {
        if (label_size < thread_num) {
            for (int i = 0; i < label_size; i++) {
                int end = i + 1;
                int start = i;
                mes[i] = SoftmaxArgs{start, end, input, label_nums, output, labels};
            }
        }else{
            blocksdim = label_size/(thread_num -1);
            for (int i = 0; i < thread_num; i++) {
                int end;
                if (i == thread_num - 1) {
                    end = label_size;
                }
                else{
                    end = blocksdim * (i + 1);
                }
                int start = blocksdim * i;
                
                mes[i] = SoftmaxArgs{start, end, input, label_nums, output, labels};
            }
        }
    }else{
        blocksdim = label_size/thread_num;

        for (int i = 0; i < thread_num; i++) {
                int end = blocksdim * (i + 1);
                int start = blocksdim * i;
                mes[i] = SoftmaxArgs{start, end, input, label_nums, output, labels};
            }
    }
    if (label_size < thread_num) {
        thread_num = label_size;
    }
    for (int i = 0; i < thread_num; i++) {
        threads[i] = std::thread(celoss_thread_function, &mes[i]);
    }
    for (int i = 0; i < thread_num; i++) {
        threads[i].join();
    }
}

// 这两个方法中的输入必须为经过softtmax的结果
void nllloss(float *input, float *labels, int label_nums, int label_size, float *output) {
    int size = label_nums * label_size;
    float onehot[size];
    label_to_onehot(label_nums, onehot, labels, label_size);
    celoss(input, onehot, label_nums, label_size, output);
}

void nllloss_backward(float *input, float *labels, int label_nums, int label_size, float *output) {
    int size = label_nums * label_size;
    float onehot[size];
    label_to_onehot(label_nums, onehot, labels, label_size);

    subarray(input, size, onehot, size, 0, output);
}