import numpy as np
import pycuda.autoinit
import pycuda.gpuarray as gpuarray
from pycuda.elementwise import ElementwiseKernel
from pycuda.compiler import SourceModule

class GPUOperations:
    """修复后的GPU操作类"""
    
    def __init__(self):
        self._init_correct_kernels()
    
    def _init_correct_kernels(self):
        """使用正确的矩阵乘法核函数"""
        
        # 正确的矩阵乘法核函数
        self.matmul_kernel_source = SourceModule("""
            __global__ void matmul(float *A, float *B, float *C, int M, int N, int K) {
                int row = blockIdx.y * blockDim.y + threadIdx.y;
                int col = blockIdx.x * blockDim.x + threadIdx.x;
                
                if (row < M && col < N) {
                    float sum = 0.0f;
                    for (int k = 0; k < K; k++) {
                        sum += A[row * K + k] * B[k * N + col];
                    }
                    C[row * N + col] = sum;
                }
            }
        """)
        
        self.matmul_kernel = self.matmul_kernel_source.get_function("matmul")
    
        """初始化GPU核函数"""
        # Sigmoid激活函数
        self.sigmoid_kernel = ElementwiseKernel(
            "float *x, float *y",
            "y[i] = 1.0f / (1.0f + expf(-x[i]))",
            "sigmoid"
        )
        
        # Sigmoid导数
        self.sigmoid_derivative_kernel = ElementwiseKernel(
            "float *x, float *y",
            "y[i] = x[i] * (1.0f - x[i])",
            "sigmoid_derivative"
        )
        
        # ReLU激活函数
        self.relu_kernel = ElementwiseKernel(
            "float *x, float *y",
            "y[i] = fmaxf(0.0f, x[i])",
            "relu"
        )
        
        # ReLU导数
        self.relu_derivative_kernel = ElementwiseKernel(
            "float *x, float *y",
            "y[i] = (x[i] > 0.0f) ? 1.0f : 0.0f",
            "relu_derivative"
        )

        
    def dot(self, a, b):
        """
        点积操作，模仿numpy.dot的行为
        """
        if not isinstance(a, gpuarray.GPUArray):
            a = gpuarray.to_gpu(np.asarray(a, dtype=np.float32))
        if not isinstance(b, gpuarray.GPUArray):
            b = gpuarray.to_gpu(np.asarray(b, dtype=np.float32))
        
        # 向量点积
        if a.ndim == 1 and b.ndim == 1:
            if a.shape[0] != b.shape[0]:
                raise ValueError(f"向量长度不匹配: {a.shape[0]} vs {b.shape[0]}")
            return gpuarray.dot(a, b)
        
        # 矩阵乘法
        elif a.ndim == 2 and b.ndim == 2:
            if a.shape[1] != b.shape[0]:
                raise ValueError(f"矩阵维度不匹配: {a.shape} 和 {b.shape}")
            
            M, K = a.shape
            _, N = b.shape
            
            # 确保矩阵在GPU上
            A_gpu = a if isinstance(a, gpuarray.GPUArray) else gpuarray.to_gpu(a)
            B_gpu = b if isinstance(b, gpuarray.GPUArray) else gpuarray.to_gpu(b)
            C_gpu = gpuarray.zeros((M, N), dtype=np.float32)
            
            # 设置网格和块大小
            block_size = 16
            grid_x = (N + block_size - 1) // block_size
            grid_y = (M + block_size - 1) // block_size
            
            # 执行核函数
            self.matmul_kernel(A_gpu, B_gpu, C_gpu, 
                             np.int32(M), np.int32(N), np.int32(K),
                             block=(block_size, block_size, 1),
                             grid=(grid_x, grid_y))
            
            return C_gpu
        
        else:
            raise ValueError(f"不支持的维度组合: a{a.shape}, b{b.shape}")

    
    def gpu_sigmoid(self, x):
        """GPU版本的sigmoid函数"""
        if not isinstance(x, gpuarray.GPUArray):
            x_gpu = gpuarray.to_gpu(np.asarray(x, dtype=np.float32))
        else:
            x_gpu = x
        
        result = gpuarray.empty_like(x_gpu)
        self.sigmoid_kernel(x_gpu, result)
        return result
    
    def gpu_sigmoid_derivative(self, x):
        """GPU版本的sigmoid导数"""
        if not isinstance(x, gpuarray.GPUArray):
            x_gpu = gpuarray.to_gpu(np.asarray(x, dtype=np.float32))
        else:
            x_gpu = x
        
        result = gpuarray.empty_like(x_gpu)
        self.sigmoid_derivative_kernel(x_gpu, result)
        return result
    
    def gpu_relu(self, x):
        """GPU版本的ReLU函数"""
        if not isinstance(x, gpuarray.GPUArray):
            x_gpu = gpuarray.to_gpu(np.asarray(x, dtype=np.float32))
        else:
            x_gpu = x
        
        result = gpuarray.empty_like(x_gpu)
        self.relu_kernel(x_gpu, result)
        return result
    
    def gpu_relu_derivative(self, x):
        """GPU版本的ReLU导数"""
        if not isinstance(x, gpuarray.GPUArray):
            x_gpu = gpuarray.to_gpu(np.asarray(x, dtype=np.float32))
        else:
            x_gpu = x
        
        result = gpuarray.empty_like(x_gpu)
        self.relu_derivative_kernel(x_gpu, result)
        return result

def test_operations():
    
    # 使用修复后的类
    gpu_ops = GPUOperations()   


    print("测试修复后的GPU操作...")
    
    # 测试向量点积
    print("1. 测试向量点积")
    a = np.array([1, 2, 3], dtype=np.float32)
    b = np.array([4, 5, 6], dtype=np.float32)
    
    cpu_dot = np.dot(a, b)
    gpu_dot = gpu_ops.dot(a, b)
    print(f"CPU点积: {cpu_dot}, GPU点积: {gpu_dot.get()}")
    print(f"向量点积一致: {np.allclose(cpu_dot, gpu_dot.get())}")
    
    # 测试矩阵乘法
    print("\n2. 测试矩阵乘法")
    np.random.seed(42)  # 固定随机种子以便比较
    A = np.random.randn(3, 4).astype(np.float32)
    B = np.random.randn(4, 5).astype(np.float32)
    
    cpu_matmul = np.dot(A, B)
    gpu_matmul = gpu_ops.dot(A, B)
    
    print(f"CPU结果形状: {cpu_matmul.shape}")
    print(f"GPU结果形状: {gpu_matmul.get().shape}")
    
    # 详细比较结果
    print("\n详细比较:")
    print("CPU结果:")
    print(cpu_matmul)
    print("GPU结果:")
    print(gpu_matmul.get())
    
    diff = np.abs(cpu_matmul - gpu_matmul.get())
    print(f"\n最大差异: {np.max(diff)}")
    print(f"平均差异: {np.mean(diff)}")
    print(f"结果是否接近: {np.allclose(cpu_matmul, gpu_matmul.get(), rtol=1e-4, atol=1e-5)}")

if __name__ == "__main__":
    test_operations()