use crate::{
    math::Matrix4f,
    pool::Handle,
    function::{
        render::{canvas_pipeline::CanvasQuadVertex, loader::ShaderLoader, rhi::Rhi},
        resource::{
            resource_system::AliceResource,
            texture::Texture,
        },
    },
};

use egui::epaint::ahash::{HashMap, HashMapExt};
use wgpu::{util::DeviceExt, CommandEncoder, TextureView};

pub static mut count: i32 = 0;

pub enum BufferType {
    Vertex,
    Index,
}

#[derive(Debug)]
pub struct WebgpuMesh {
    pub vertex_count: usize,
    pub vertex_buffer: wgpu::Buffer,
    pub indics_count: usize,
    pub index_buffer: wgpu::Buffer,
}

impl WebgpuMesh {
    pub fn get_vertex_layout<'a>() -> wgpu::VertexBufferLayout<'a> {
        wgpu::VertexBufferLayout {
            array_stride: std::mem::size_of::<CanvasQuadVertex>() as wgpu::BufferAddress,
            step_mode: wgpu::VertexStepMode::Vertex,
            attributes: &[
                wgpu::VertexAttribute {
                    offset: 0 as wgpu::BufferAddress,
                    shader_location: 0,
                    format: wgpu::VertexFormat::Float32x4,
                },
                wgpu::VertexAttribute {
                    offset: (std::mem::size_of::<f32>() * 4) as wgpu::BufferAddress,
                    shader_location: 1,
                    format: wgpu::VertexFormat::Float32x2,
                },
                wgpu::VertexAttribute {
                    offset: (std::mem::size_of::<f32>() * 6) as wgpu::BufferAddress,
                    shader_location: 2,
                    format: wgpu::VertexFormat::Uint32,
                },
            ],
        }
    }

    pub fn get_instace_vertex_layout<'a>() -> wgpu::VertexBufferLayout<'a> {
        wgpu::VertexBufferLayout {
            array_stride: std::mem::size_of::<Matrix4f>() as wgpu::BufferAddress,
            // We need to switch from using a step mode of Vertex to Instance
            // This means that our shaders will only change to use the next
            // instance when the shader starts processing a new instance
            step_mode: wgpu::VertexStepMode::Instance,
            attributes: &[
                wgpu::VertexAttribute {
                    offset: 0,
                    // While our vertex shader only uses locations 0, and 1 now, in later tutorials we'll
                    // be using 2, 3, and 4, for Vertex. We'll start at slot 5 not conflict with them later
                    shader_location: 3,
                    format: wgpu::VertexFormat::Float32x4,
                },
                // A mat4 takes up 4 vertex slots as it is technically 4 vec4s. We need to define a slot
                // for each vec4. We'll have to reassemble the mat4 in
                // the shader.
                wgpu::VertexAttribute {
                    offset: std::mem::size_of::<[f32; 4]>() as wgpu::BufferAddress,
                    shader_location: 4,
                    format: wgpu::VertexFormat::Float32x4,
                },
                wgpu::VertexAttribute {
                    offset: std::mem::size_of::<[f32; 8]>() as wgpu::BufferAddress,
                    shader_location: 5,
                    format: wgpu::VertexFormat::Float32x4,
                },
                wgpu::VertexAttribute {
                    offset: std::mem::size_of::<[f32; 12]>() as wgpu::BufferAddress,
                    shader_location: 6,
                    format: wgpu::VertexFormat::Float32x4,
                },
            ],
        }
    }
}

pub struct CanvasScene {
    pub global_buffer: wgpu::Buffer,
    pub global_group: wgpu::BindGroup,
}

impl CanvasScene {
    pub fn get_bind_group_layout(rhi: Rhi) -> wgpu::BindGroupLayout {
        let camera_layout =
            rhi.borrow()
                .device
                .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
                    entries: &[wgpu::BindGroupLayoutEntry {
                        ty: wgpu::BindingType::Buffer {
                            ty: wgpu::BufferBindingType::Uniform,
                            has_dynamic_offset: false,
                            min_binding_size: None,
                        },
                        count: None,
                        binding: 0,
                        visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
                    }],
                    label: Some("canvas scene global data layout"),
                });

        camera_layout
    }
}

#[derive(Debug)]
pub struct SizedBuffer {
    buffer: wgpu::Buffer,
    size: usize,
}

pub struct CanvasPass {
    graphic_pipeline: wgpu::RenderPipeline,
    pub index_buffers: Vec<SizedBuffer>,
    pub vertex_buffers: Vec<SizedBuffer>,
    canvas_scene: CanvasScene,
    canvas_batch_group_layout: wgpu::BindGroupLayout,
    canvas_batch_group: HashMap<Handle<Texture>, wgpu::BindGroup>,
}

impl CanvasPass {
    pub fn new(rhi: Rhi) -> Self {
        let shader = include_str!("canvas.wgsl");

        let shader = ShaderLoader::load(&rhi.borrow().device, shader);

        let mut group_layouts = Vec::with_capacity(2);

        // camera

        let transform = Matrix4f::webgl_to_wgpu(Matrix4f::identity());

        let global_buffer =
            rhi.borrow()
                .device
                .create_buffer_init(&wgpu::util::BufferInitDescriptor {
                    label: Some("Camera Buffer"),
                    contents: bytemuck::cast_slice(&[transform]),
                    usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
                });

        let global_layout = CanvasScene::get_bind_group_layout(rhi.clone());

        let global_group = rhi
            .borrow()
            .device
            .create_bind_group(&wgpu::BindGroupDescriptor {
                layout: &global_layout,
                entries: &[wgpu::BindGroupEntry {
                    binding: 0,
                    resource: global_buffer.as_entire_binding(),
                }],
                label: Some("main camera "),
            });

        let canvas_scene = CanvasScene {
            global_buffer,
            global_group,
        };
        let canvas_batch_group_layout =
            rhi.borrow()
                .device
                .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
                    label: Some("canvas_batch_group_layout"),
                    entries: &[
                        wgpu::BindGroupLayoutEntry {
                            binding: 0,
                            visibility: wgpu::ShaderStages::FRAGMENT,
                            ty: wgpu::BindingType::Texture {
                                multisampled: false,
                                sample_type: wgpu::TextureSampleType::Float { filterable: true },
                                view_dimension: wgpu::TextureViewDimension::D2,
                            },
                            count: None,
                        },
                        wgpu::BindGroupLayoutEntry {
                            binding: 1,
                            visibility: wgpu::ShaderStages::FRAGMENT,
                            ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
                            count: None,
                        },
                    ],
                });
        group_layouts.push(&global_layout);
        group_layouts.push(&canvas_batch_group_layout);
        let layout = rhi
            .borrow()
            .device
            .create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
                label: Some("Main Camera Pass Layout"),
                bind_group_layouts: group_layouts.as_slice(),
                push_constant_ranges: &[],
            });

        let primitive = wgpu::PrimitiveState {
            topology: wgpu::PrimitiveTopology::TriangleList,
            strip_index_format: None,
            front_face: wgpu::FrontFace::Ccw,
            cull_mode: None,
            // cull_mode: None,
            // Setting this to anything other than Fill requires Features::POLYGON_MODE_LINE
            // or Features::POLYGON_MODE_POINT
            polygon_mode: wgpu::PolygonMode::Fill,
            // Requires Features::DEPTH_CLIP_CONTROL
            unclipped_depth: false,
            // Requires Features::CONSERVATIVE_RASTERIZATION
            conservative: false,
        };

        let multisample = wgpu::MultisampleState {
            count: 1,
            mask: !0,
            alpha_to_coverage_enabled: false,
        };
        let multiview = None;

        let vertex = wgpu::VertexState {
            module: &shader,
            entry_point: "vs_main",
            buffers: &[WebgpuMesh::get_vertex_layout()],
        };

        let fragment = wgpu::FragmentState {
            module: &shader,
            entry_point: "fs_main",
            targets: &[Some(wgpu::ColorTargetState {
                format: rhi.borrow().config.format,
                // format: wgpu::TextureFormat::Bgra8UnormSrgb,
                blend: Some(wgpu::BlendState {
                    color: wgpu::BlendComponent {
                        src_factor: wgpu::BlendFactor::SrcAlpha,
                        dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
                        operation: wgpu::BlendOperation::Add,
                    },
                    alpha: wgpu::BlendComponent {
                        src_factor: wgpu::BlendFactor::SrcAlpha,
                        dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
                        operation: wgpu::BlendOperation::Add,
                    },
                }),
                write_mask: wgpu::ColorWrites::ALL,
            })],
        };

        let graphic_pipeline =
            rhi.borrow()
                .device
                .create_render_pipeline(&wgpu::RenderPipelineDescriptor {
                    label: Some("main camera pass pipeline"),
                    layout: Some(&layout),
                    vertex: vertex,
                    fragment: Some(fragment),
                    primitive: primitive,
                    depth_stencil: None,
                    multisample,
                    multiview,
                });

        Self {
            graphic_pipeline,
            vertex_buffers: Vec::with_capacity(64),
            index_buffers: Vec::with_capacity(64),
            canvas_batch_group_layout,
            canvas_scene,
            canvas_batch_group: HashMap::new(),
        }
    }
}

impl CanvasPass {
    //

    // 更新相机
    pub fn update_scene(&mut self, rhi: Rhi, transform: Matrix4f) {
        rhi.borrow().queue.write_buffer(
            &self.canvas_scene.global_buffer,
            0,
            bytemuck::cast_slice(&[transform]),
        );
    }

    pub fn draw(
        &mut self,
        encoder: &mut CommandEncoder,
        view: &TextureView,
        textures: &Vec<Handle<Texture>>,
    ) {
        let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
            label: Some("Canvas Render Pass"),
            color_attachments: &[Some(wgpu::RenderPassColorAttachment {
                view: view,
                resolve_target: None,
                ops: wgpu::Operations {
                    load: wgpu::LoadOp::Clear(wgpu::Color {
                        r: 0.0,
                        g: 0.0,
                        b: 0.0,
                        a: 1.0,
                    }),
                    store: true,
                },
            })],
            depth_stencil_attachment: None,
        });
        render_pass.set_pipeline(&self.graphic_pipeline);
        render_pass.set_bind_group(0, &self.canvas_scene.global_group, &[]);
        for (index, handle) in textures.iter().enumerate() {
      
            if let Some(group) = self.canvas_batch_group.get(&handle) {
                render_pass.set_bind_group(1, group, &[]);

                render_pass.set_vertex_buffer(0, self.vertex_buffers[index].buffer.slice(..));
                render_pass.set_index_buffer(
                    self.index_buffers[index].buffer.slice(..),
                    wgpu::IndexFormat::Uint16,
                );
                render_pass.draw_indexed(0..self.index_buffers[index].size as u32, 0, 0..1);
            }
        }
    }
    /// 更新canvas 的顶点、纹理
    ///
    pub fn update_canvas_data(
        &mut self,
        index: usize,
        rhi: Rhi,
        textures: &[Handle<Texture>],
        vertices: &Vec<CanvasQuadVertex>,
        indices: &[u16],
        resource: &AliceResource,
    ) {
          // texture
          self.update_canvas_texture_bind_group(rhi.clone(), textures, resource);
                // vertex
                let v:&[u8] = bytemuck::cast_slice(vertices.as_slice());
            
                self.update_vertex_buffer(rhi.clone(), index,v );
                self.update_index_buffer(rhi.clone(), index, indices);
       
    }

    /// Updates the buffers used by egui. Will properly re-size the buffers if needed.
    pub fn update_vertex_buffer(&mut self, rhi: Rhi, index: usize, data: &[u8]) {
        if index >= self.vertex_buffers.len() {
            let buffer =
                rhi.borrow()
                    .device
                    .create_buffer_init(&wgpu::util::BufferInitDescriptor {
                        label: Some("canvas_vertex_buffer"),
                        contents: data,
                        usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
                    });

            self.vertex_buffers.push(SizedBuffer {
                buffer,
                size: data.len(),
            });
        } else {
            let buffer = &mut self.vertex_buffers[index];
            let storage = wgpu::BufferUsages::VERTEX;
         
            if data.len() > buffer.size {
                println!(" data.len() = {:?}", data.len());
                buffer.size = data.len();
                buffer.buffer =
                    rhi.borrow()
                        .device
                        .create_buffer_init(&wgpu::util::BufferInitDescriptor {
                            label: Some("canvas_vertex_buffer"),
                            contents: data,
                            usage: storage | wgpu::BufferUsages::COPY_DST,
                        });
            } else {
                // println!(" write_buffer = {:?}", data.len());
                
                rhi.borrow()
                    .queue
                    .write_buffer(&buffer.buffer, 0, data);
            }
        }
    }

    pub fn update_index_buffer(&mut self, rhi: Rhi, index: usize, data: &[u16]) {
      
        if index >= self.index_buffers.len() {
            let buffer =
                rhi.borrow()
                    .device
                    .create_buffer_init(&wgpu::util::BufferInitDescriptor {
                        label: Some("canvas_index_buffer"),
                        contents: bytemuck::cast_slice(data),
                        usage: wgpu::BufferUsages::INDEX | wgpu::BufferUsages::COPY_DST,
                    });

            self.index_buffers.push(SizedBuffer {
                buffer,
                size: data.len(),
            });
        } else {
            let buffer = &mut self.index_buffers[index];
            let storage = wgpu::BufferUsages::INDEX;

            if data.len() > buffer.size {
                buffer.size = data.len();
                buffer.buffer =
                    rhi.borrow()
                        .device
                        .create_buffer_init(&wgpu::util::BufferInitDescriptor {
                            label: Some("canvas_index_buffer"),
                            contents: bytemuck::cast_slice(data),
                            usage: storage | wgpu::BufferUsages::COPY_DST,
                        });
            } else {
                rhi.borrow()
                    .queue
                    .write_buffer(&buffer.buffer, 0, bytemuck::cast_slice(data));
            }
        }
    }
    /// 更新canvas 的纹理
    pub fn update_canvas_texture_bind_group(
        &mut self,
        rhi: Rhi,
        handles: &[Handle<Texture>],
        resource: &AliceResource,
    ) {
        let resource = resource.borrow();
        let sampler = resource.get_default_sampler();
        for handle in handles {
            if !self.canvas_batch_group.contains_key(&handle) {
                let texture = resource.get_texture(*handle);
                let group = rhi
                    .borrow()
                    .device
                    .create_bind_group(&wgpu::BindGroupDescriptor {
                        label: Some(format!("canvas_texture_bind_group = {:?}", handle).as_str()),
                        layout: &self.canvas_batch_group_layout,
                        entries: &[
                            wgpu::BindGroupEntry {
                                binding: 0,
                                resource: wgpu::BindingResource::TextureView(&texture.view),
                            },
                            wgpu::BindGroupEntry {
                                binding: 1,
                                resource: wgpu::BindingResource::Sampler(&sampler.sampler),
                            },
                        ],
                    });

                self.canvas_batch_group.insert(*handle, group);
            }
        }
    }
}
