#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <mpi.h>

#define MIN_SIZE 128
#define MAX_SIZE 2048
#define MAX_PRINT 4

typedef struct {
    int rows;
    int cols;
    double *data;
} Matrix;

// 创建矩阵信息的MPI派生数据类型
MPI_Datatype create_matrix_info_type() {
    MPI_Datatype matrix_info_type;
    int blocklengths[3] = {1, 1, 1};
    MPI_Datatype types[3] = {MPI_INT, MPI_INT, MPI_DOUBLE};
    MPI_Aint displacements[3];
    
    Matrix dummy;
    MPI_Get_address(&dummy.rows, &displacements[0]);
    MPI_Get_address(&dummy.cols, &displacements[1]);
    MPI_Get_address(&dummy.data, &displacements[2]);
    
    // 计算相对位移
    displacements[2] -= displacements[0];
    displacements[1] -= displacements[0];
    displacements[0] = 0;
    
    MPI_Type_create_struct(3, blocklengths, displacements, types, &matrix_info_type);
    MPI_Type_commit(&matrix_info_type);
    return matrix_info_type;
}

void generate_matrix(Matrix *mat) {
    mat->data = (double *)malloc(mat->rows * mat->cols * sizeof(double));
    for (int i = 0; i < mat->rows * mat->cols; i++) {
        mat->data[i] = (double)rand() / RAND_MAX * 10.0;
    }
}

void print_matrix_part(Matrix *mat) {
    int print_rows = (mat->rows < MAX_PRINT) ? mat->rows : MAX_PRINT;
    int print_cols = (mat->cols < MAX_PRINT) ? mat->cols : MAX_PRINT;
    
    for (int i = 0; i < print_rows; i++) {
        for (int j = 0; j < print_cols; j++) {
            printf("%6.2f ", mat->data[i * mat->cols + j]);
        }
        printf("\n");
    }
}

void matrix_multiply(Matrix *A, Matrix *B, Matrix *C, int start_row, int end_row) {
    for (int i = start_row; i < end_row; i++) {
        for (int j = 0; j < B->cols; j++) {
            C->data[i * B->cols + j] = 0.0;
            for (int k = 0; k < A->cols; k++) {
                C->data[i * B->cols + j] += A->data[i * A->cols + k] * B->data[k * B->cols + j];
            }
        }
    }
}

int main(int argc, char *argv[]) {
    int rank, size;
    int m, n, k;
    Matrix A, B, C;
    double start_time, end_time;
    
    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    
    // 创建矩阵信息数据类型
    MPI_Datatype matrix_info_type = create_matrix_info_type();
    
    // 设置随机种子（主进程）
    if (rank == 0) {
        srand(time(NULL));

        m = 2048; n = 2048; k = 2048; // 默认值，实际应从输入获取
        
        if (m < MIN_SIZE || m > MAX_SIZE || 
            n < MIN_SIZE || n > MAX_SIZE || 
            k < MIN_SIZE || k > MAX_SIZE) {
            printf("Matrix size out of range [%d, %d]\n", MIN_SIZE, MAX_SIZE);
            MPI_Abort(MPI_COMM_WORLD, 1);
        }
        
        // 生成矩阵A和B
        A.rows = m; A.cols = n;
        B.rows = n; B.cols = k;
        generate_matrix(&A);
        generate_matrix(&B);
        
        printf("Matrix A (partial):\n");
        print_matrix_part(&A);
        printf("\nMatrix B (partial):\n");
        print_matrix_part(&B);
    }
    
    // 广播矩阵尺寸
    MPI_Bcast(&m, 1, MPI_INT, 0, MPI_COMM_WORLD);
    MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD);
    MPI_Bcast(&k, 1, MPI_INT, 0, MPI_COMM_WORLD);
    
    // 非主进程初始化B矩阵结构
    if (rank != 0) {
        B.rows = n; B.cols = k;
        B.data = (double *)malloc(n * k * sizeof(double));
    }
    
    // 使用派生数据类型广播B矩阵信息
    if (rank == 0) {
        MPI_Bcast(&B, 1, matrix_info_type, 0, MPI_COMM_WORLD);
    } else {
        MPI_Bcast(&B, 1, matrix_info_type, 0, MPI_COMM_WORLD);
        // 分配B矩阵数据空间
        B.data = (double *)malloc(B.rows * B.cols * sizeof(double));
    }
    
    // 广播B矩阵数据
    MPI_Bcast(B.data, B.rows * B.cols, MPI_DOUBLE, 0, MPI_COMM_WORLD);
    
    // 计算每个进程负责的行范围
    int rows_per_proc = m / size;
    int remainder = m % size;
    int start_row = rank * rows_per_proc;
    int end_row = start_row + rows_per_proc;
    
    if (rank < remainder) {
        start_row += rank;
        end_row += rank + 1;
    } else {
        start_row += remainder;
        end_row += remainder;
    }
    
    // 准备Scatterv参数
    int *sendcounts = NULL;
    int *displs = NULL;
    
    if (rank == 0) {
        sendcounts = (int *)malloc(size * sizeof(int));
        displs = (int *)malloc(size * sizeof(int));
        
        for (int i = 0; i < size; i++) {
            int i_start = i * rows_per_proc;
            int i_end = i_start + rows_per_proc;
            
            if (i < remainder) {
                i_start += i;
                i_end += i + 1;
            } else {
                i_start += remainder;
                i_end += remainder;
            }
            
            sendcounts[i] = (i_end - i_start) * n;
            displs[i] = i_start * n;
        }
        
        // 初始化结果矩阵C
        C.rows = m; C.cols = k;
        C.data = (double *)malloc(m * k * sizeof(double));
    }
    
    // 分配本地A和C的空间
    Matrix local_A, local_C;
    local_A.rows = end_row - start_row;
    local_A.cols = n;
    local_A.data = (double *)malloc(local_A.rows * n * sizeof(double));
    
    local_C.rows = end_row - start_row;
    local_C.cols = k;
    local_C.data = (double *)calloc(local_C.rows * k, sizeof(double));
    
    // 分发A的行数据
    MPI_Scatterv(rank == 0 ? A.data : NULL, sendcounts, displs, MPI_DOUBLE,
                local_A.data, local_A.rows * n, MPI_DOUBLE,
                0, MPI_COMM_WORLD);
    
    // 开始计时
    MPI_Barrier(MPI_COMM_WORLD);
    start_time = MPI_Wtime();
    
    // 本地矩阵乘法计算
    matrix_multiply(&local_A, &B, &local_C, 0, local_C.rows);
    
    // 结束计时
    end_time = MPI_Wtime();
    
    // 收集结果
    MPI_Gatherv(local_C.data, local_C.rows * k, MPI_DOUBLE,
               rank == 0 ? C.data : NULL, sendcounts, displs, MPI_DOUBLE,
               0, MPI_COMM_WORLD);
    
    // 主进程输出结果
    if (rank == 0) {
        printf("\nResult Matrix C (partial):\n");
        print_matrix_part(&C);
        printf("\nMatrix multiplication time: %.6f seconds\n", end_time - start_time);
    }
    
    // 释放资源
    free(local_A.data);
    free(local_C.data);
    free(B.data);
    
    if (rank == 0) {
        free(A.data);
        free(C.data);
        free(sendcounts);
        free(displs);
    }
    
    MPI_Type_free(&matrix_info_type);
    MPI_Finalize();
    return 0;
}