#include <iostream>
#include <cmath>
#include <vector>
#include <thread>
#include "sigmoid.h"
#include "shape.h"
#include "tensor.h"


float fun_sigmoid(float x){
    return 1/(1+exp(-x));
}

typedef struct{
    int start;
    int end;
    float *a;
    float *b;
    float *c;
}Tos;

void sig_t(Tos *p){
    for(int i  =p->start; i < p->end; i++){
        p->b[i]=1/(1+exp(-p->a[i]));
    }
}

Tensor *sigmoid(Tensor *x){
    int size = data_size(x);
    float *new_data = new float[size];
    int thread_num = std::thread::hardware_concurrency();
    std::vector<std::thread> threads(thread_num);
    std::vector<Tos> mes(thread_num);
    int blocksdim;
    if (size%thread_num != 0) {
        if (size < thread_num) {
            for (int i = 0; i < size; i++) {
                int end = i + 1;
                int start = i;
                mes[i] = Tos{start, end, x->data, new_data, NULL};
            }
        }else{
            blocksdim = size/(thread_num -1);
            for (int i = 0; i < thread_num; i++) {
                int end;
                if (i == thread_num - 1) {
                    end = size;
                }
                else{
                    end = blocksdim * (i + 1);
                }
                int start = blocksdim * i;
                
                mes[i] = Tos{start, end, x->data, new_data, NULL};
            }
        }
    }else{
        blocksdim = size/thread_num;

        for (int i = 0; i < thread_num; i++) {
                int end = blocksdim * (i + 1);
                int start = blocksdim * i;
                mes[i] = Tos{start, end, x->data, new_data, NULL};
            }
    }
    if (size < thread_num) {
        thread_num = size;
    }
    for (int i = 0; i < thread_num; i++) {
        threads[i] = std::thread(sig_t, &mes[i]);
    }
    for (int i = 0; i < thread_num; i++) {
        threads[i].join();
    }
    
    Shape *new_shape = shapeCopy(x->shape);
    return new Tensor(new_shape, new_data, x->device);
}

Tensor *sig_back_t(Tos *p){
    for (int i  =p->start; i < p->end; i++){
        p->b[i] = fun_sigmoid(p->a[i]) * (1 - fun_sigmoid(p->a[i])) * p->c[i];
    }
}

Tensor *sig_backward(Tensor *grad, Tensor *x){
    int size = data_size(x);
    float *new_data = new float[size];
    int thread_num = std::thread::hardware_concurrency();
    std::vector<std::thread> threads(thread_num);
    std::vector<Tos> mes(thread_num);
    int blocksdim;
    if (size%thread_num != 0) {
        if (size < thread_num) {
            for (int i = 0; i < size; i++) {
                int end = i + 1;
                int start = i;
                mes[i] = Tos{start, end, x->data, new_data, grad->data};
            }
        }else{
            blocksdim = size/(thread_num -1);
            for (int i = 0; i < thread_num; i++) {
                int end;
                if (i == thread_num - 1) {
                    end = size;
                }
                else{
                    end = blocksdim * (i + 1);
                }
                int start = blocksdim * i;
                
                mes[i] = Tos{start, end, x->data, new_data, grad->data};
            }
        }
    }else{
        blocksdim = size/thread_num;

        for (int i = 0; i < thread_num; i++) {
                int end = blocksdim * (i + 1);
                int start = blocksdim * i;
                mes[i] = Tos{start, end, x->data, new_data, grad->data};
            }
    }
    if (size < thread_num) {
        thread_num = size;
    }
    for (int i = 0; i < thread_num; i++) {
        threads[i] = std::thread(sig_t, &mes[i]);
    }
    for (int i = 0; i < thread_num; i++) {
        threads[i].join();
    }
    Shape *new_shape = shapeCopy(x->shape);
    return new Tensor(new_shape, new_data, x->device);
}