
use wgpu::MemoryHints::Performance; 
use winit::window::Window; 
use std::sync::Arc;
use std::borrow::Cow;



pub struct Context<'a> {
    surface: wgpu::Surface<'a>, // 窗口surface 需要指定生命周期
    config: wgpu::SurfaceConfiguration, 
    adapter: wgpu::Adapter,
    device: wgpu::Device, // 设备
    queue: wgpu::Queue, // 队列 
    pipeline: wgpu::RenderPipeline,
}


impl<'a> Context<'a>  {
    pub async fn new_async(window: Arc<Window>) -> Context<'a> {
        let instance = wgpu::Instance::default();
        //  represents a platform-specific surface (e.g. a window) onto which rendered images may be presented
        let surface = instance.create_surface(Arc::clone(&window)).unwrap();
        let adapter = instance.request_adapter(&wgpu::RequestAdapterOptions {
            power_preference: wgpu::PowerPreference::default(),
            compatible_surface: Some(&surface),
            force_fallback_adapter: false,
        })
        .await
        .expect("Failed to find an appropriate adapter" );

        let (device, queue) = adapter
        .request_device(
            &wgpu::DeviceDescriptor {
                label: None,
                required_features: wgpu::Features::empty(),
                // Make sure we use the texture resolution limits from the adapter, 
                //so we can support images the size of the swapchain.
                required_limits: wgpu::Limits::downlevel_webgl2_defaults()
                    .using_resolution(adapter.limits()),
                memory_hints: Performance,
            },
            None,
        )
        .await
        .expect("Failed to create device");

        let size = window.inner_size();
        let width = size.width.max(1);
        let height = size.height.max(1);        
         // 获取一个默认配置
         let config = surface.get_default_config(&adapter, width, height).unwrap();
         // 完成首次配置
         surface.configure(&device, &config);

         let pipeline = Self::create_render_pipeline(&device, config.format);

         Context {
            surface,
            config,
            adapter,
            device,
            queue,
            pipeline,  
         }
    }

    pub fn new(window: Arc<Window>) -> Context<'a> {
        pollster::block_on(Self::new_async(window))
    }

    pub fn resize(&mut self, width: u32, height: u32) {
        self.config.width = width.max(1);
        self.config.height = height.max(1);
        // self.device.resize(width, height);
        self.surface.configure(&self.device, &self.config);
    }

    pub fn draw(&mut self) {    
        let surface_texture = self
        .surface
        .get_current_texture()
        .expect("Failed to acquire next swap chain texture");

        let texture_view = surface_texture
        .texture
        .create_view(&wgpu::TextureViewDescriptor::default());

        let mut encoder = self
        .device
        .create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
        {
            let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
                label: None,
                color_attachments: &[Some(wgpu::RenderPassColorAttachment {
                    view: &texture_view,
                    resolve_target: None,
                    ops: wgpu::Operations {
                        load: wgpu::LoadOp::Clear(wgpu::Color::GREEN),
                        store: wgpu::StoreOp::Store,
                    },
                })],
                depth_stencil_attachment: None,
                timestamp_writes: None,
                occlusion_query_set: None,
            });
            rpass.set_pipeline(&self.pipeline);
            rpass.draw(0..3, 0..1);
        }
        self.queue.submit(Some(encoder.finish()));
        surface_texture.present();
    }

    fn create_render_pipeline(device: &wgpu::Device,
        swapchain_format: wgpu::TextureFormat,
    ) -> wgpu::RenderPipeline {
        let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
            label: None,
            source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(include_str!("shader.wgsl"))),
        });

        let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
            label: None,
            bind_group_layouts: &[],
            push_constant_ranges: &[],
        });       

        let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
            label: None,
            layout: Some(&layout),
            vertex: wgpu::VertexState {
                module: &shader,
                entry_point: Some("vs_main"),
                buffers: &[],
                compilation_options: Default::default(),
            },
            fragment: Some(wgpu::FragmentState {
                module: &shader,
                entry_point: Some("fs_main"),
                compilation_options: Default::default(),
                targets: &[Some(swapchain_format.into())],
            }),
            primitive: wgpu::PrimitiveState{
                // topology: wgpu::PrimitiveTopology::TriangleList,
                // strip_index_format: None,
                // front_face: wgpu::FrontFace::Ccw,
                // cull_mode: Some(wgpu::Face::Back),
                // // Setting this to anything other than Fill requires Features::POLYGON_MODE_LINE
                // // or Features::POLYGON_MODE_POINT
                // polygon_mode: wgpu::PolygonMode::Fill,
                // // Requires Features::DEPTH_CLIP_CONTROL
                // unclipped_depth: false,
                // // Requires Features::CONSERVATIVE_RASTERIZATION
                // conservative: false,
                ..Default::default()
            },
            depth_stencil: None,
            multisample: wgpu::MultisampleState::default(),
            multiview: None,
            cache: None,
        });
    
        pipeline
    }
}   

