use std::sync::Arc;

use image::DynamicImage;
use lyon::geom::euclid::default::{Size2D, Transform3D};
use piet_cosmic_text::cosmic_text::SwashCache;
use piet_cosmic_text::Text;
use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
use wgpu::util::{BufferInitDescriptor, DeviceExt};
use wgpu::{
    BindGroup, BindGroupDescriptor, BindGroupEntry, BindGroupLayout, BindGroupLayoutDescriptor,
    BindGroupLayoutEntry, BindingType, Buffer, BufferUsages, CommandEncoderDescriptor,
    RenderPipeline, ShaderStages, Surface, TextureViewDescriptor,
};

use crate::{primitive_pipeline, texture_pipeline};

use super::frame::Frame;
use super::texture::Texture;

pub struct Graphics {
    pub device: wgpu::Device,
    pub queue: wgpu::Queue,
    pub surface: Surface<'static>,
    pub config: wgpu::SurfaceConfiguration,
    pub primitive_pipeline: RenderPipeline,
    pub texture_pipeline: RenderPipeline,
    // multisampled framebuffer
    pub msaa_framebuffer: wgpu::TextureView,
    pub view_buffer: Buffer,
    pub view_binding: BindGroup,
    pub texture_layout: BindGroupLayout,
    pub transform_layout: BindGroupLayout,
    pub texture_vbo: Buffer,
    pub texture_ibo: Buffer,
    pub texture_indices: u32,
    // pub staging_belt: StagingBelt,
    pub view_matrix: Transform3D<f32>,
    pub font_system: Text,
    pub swash_cache: SwashCache,
}

impl Graphics {
    pub async fn new<W: HasWindowHandle + HasDisplayHandle + Sync + Send + 'static>(
        window: Arc<W>,
    ) -> Self {
        // The instance is a handle to our GPU
        // BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
        let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
            backends: wgpu::Backends::all(),
            flags: Default::default(),
            dx12_shader_compiler: Default::default(),
            gles_minor_version: Default::default(),
        });

        let surface = instance.create_surface(window).unwrap();

        let adapter = instance
            .request_adapter(&wgpu::RequestAdapterOptions {
                power_preference: wgpu::PowerPreference::LowPower,
                compatible_surface: Some(&surface),
                force_fallback_adapter: false,
            })
            .await
            .unwrap();

        let (device, queue) = adapter
            .request_device(
                &wgpu::DeviceDescriptor {
                    label: None,
                    required_features: Default::default(),
                    required_limits: Default::default(),
                },
                None, // Trace path
            )
            .await
            .unwrap();

        let surface_caps = surface.get_capabilities(&adapter);
        // dbg!(&surface_caps);
        // Shader code in this tutorial assumes an Srgb surface texture. Using a different
        // one will result all the colors comming out darker. If you want to support non
        // Srgb surfaces, you'll need to account for that when drawing to the frame.
        let surface_format = surface_caps
            .formats
            .iter()
            .copied()
            .find(|f| f.is_srgb())
            .unwrap_or(surface_caps.formats[0]);
        let config = wgpu::SurfaceConfiguration {
            usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
            format: surface_format,
            width: 1,
            height: 1,
            present_mode: surface_caps.present_modes[0],
            desired_maximum_frame_latency: 0,
            alpha_mode: surface_caps.alpha_modes[0],
            view_formats: vec![],
        };
        // let config = surface.get_default_config(&adapter, 512, 512).unwrap();
        // let surface_format = config.format;
        surface.configure(&device, &config);
        let view_matrix: Transform3D<f32> =
            Transform3D::translation(-1.0, 1.0, 0.0).pre_scale(1.0, -1.0, 1.0);
        let view_buffer = device.create_buffer_init(&BufferInitDescriptor {
            label: Some("view buffer"),
            contents: bytemuck::cast_slice(&view_matrix.to_arrays()),
            usage: BufferUsages::UNIFORM | BufferUsages::COPY_DST,
        });
        let view_layout = device.create_bind_group_layout(&BindGroupLayoutDescriptor {
            label: Some("view bind group layout"),
            entries: &[BindGroupLayoutEntry {
                binding: 0,
                visibility: ShaderStages::VERTEX,
                ty: BindingType::Buffer {
                    ty: wgpu::BufferBindingType::Uniform,
                    has_dynamic_offset: false,
                    min_binding_size: None,
                },
                count: None,
            }],
        });
        let view_binding = device.create_bind_group(&BindGroupDescriptor {
            label: Some("view bind group"),
            layout: &view_layout,
            entries: &[BindGroupEntry {
                binding: 0,
                resource: view_buffer.as_entire_binding(),
            }],
        });
        let transform_layout = device.create_bind_group_layout(&BindGroupLayoutDescriptor {
            label: None,
            entries: &[BindGroupLayoutEntry {
                binding: 0,
                visibility: ShaderStages::VERTEX,
                ty: BindingType::Buffer {
                    ty: wgpu::BufferBindingType::Uniform,
                    has_dynamic_offset: false,
                    min_binding_size: None,
                },
                count: None,
            }],
        });
        let texture_layout = device.create_bind_group_layout(&BindGroupLayoutDescriptor {
            entries: &[
                BindGroupLayoutEntry {
                    binding: 0,
                    visibility: ShaderStages::FRAGMENT,
                    ty: BindingType::Texture {
                        multisampled: false,
                        view_dimension: wgpu::TextureViewDimension::D2,
                        sample_type: wgpu::TextureSampleType::Float { filterable: true },
                    },
                    count: None,
                },
                BindGroupLayoutEntry {
                    binding: 1,
                    visibility: ShaderStages::FRAGMENT,
                    // This should match the filterable field of the
                    // corresponding Texture entry above.
                    ty: BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
                    count: None,
                },
            ],
            label: Some("texture_bind_group_layout"),
        });
        let primitive_pipeline = primitive_pipeline::create_primitive_pipeline(
            &device,
            &[&view_layout, &transform_layout],
            surface_format,
        );
        let texture_pipeline = texture_pipeline::create_texture_pipeline(
            &device,
            &[&view_layout, &transform_layout, &texture_layout],
            surface_format,
        );
        let (texture_vbo, texture_ibo, texture_indices) =
            texture_pipeline::create_texture_buffers(&device);

        let msaa_framebuffer = create_multisampled_framebuffer(&device, &config, 4);

        // Create staging belt
        // let staging_belt = StagingBelt::new(1024);
        let font_system = Text::new();
        let swash_cache = SwashCache::new();

        Self {
            device,
            queue,
            surface,
            config,
            primitive_pipeline,
            texture_pipeline,
            msaa_framebuffer,
            texture_layout,
            texture_vbo,
            texture_ibo,
            texture_indices,
            view_binding,
            view_buffer,
            transform_layout,
            view_matrix,
            font_system,
            swash_cache,
        }
    }

    pub fn resize(&mut self, new_size: &Size2D<u32>) {
        if new_size.width > 0 && new_size.height > 0 {
            self.config.width = new_size.width;
            self.config.height = new_size.height;

            let view_matrix: Transform3D<f32> = Transform3D::translation(-1.0, 1.0, 0.0).pre_scale(
                2.0 / new_size.width as f32,
                -2.0 / new_size.height as f32,
                1.0,
            );
            self.queue.write_buffer(
                &self.view_buffer,
                0,
                bytemuck::cast_slice(&view_matrix.to_arrays()),
            );
            self.msaa_framebuffer = create_multisampled_framebuffer(&self.device, &self.config, 4);
            self.surface.configure(&self.device, &self.config);
            self.view_matrix = view_matrix;
        }
    }

    pub fn load_texture(&self, image: &DynamicImage, label: Option<&str>) -> Texture {
        Texture::from_image(&self.device, &self.queue, image, label)
    }

    pub fn texture_binding(&self, texture: &Texture) -> BindGroup {
        self.device.create_bind_group(&BindGroupDescriptor {
            label: Some("texture bind group"),
            layout: &self.texture_layout,
            entries: &[
                BindGroupEntry {
                    binding: 0,
                    resource: wgpu::BindingResource::TextureView(&texture.view),
                },
                BindGroupEntry {
                    binding: 1,
                    resource: wgpu::BindingResource::Sampler(&texture.sampler),
                },
            ],
        })
    }

    pub fn transform_binding(&self, transform: Transform3D<f32>) -> BindGroup {
        let buffer = self.device.create_buffer_init(&BufferInitDescriptor {
            label: None,
            contents: bytemuck::cast_slice(&transform.to_arrays()),
            usage: BufferUsages::UNIFORM,
        });
        self.device.create_bind_group(&BindGroupDescriptor {
            label: None,
            layout: &self.transform_layout,
            entries: &[BindGroupEntry {
                binding: 0,
                resource: buffer.as_entire_binding(),
            }],
        })
    }

    fn frame(&mut self) -> Frame {
        let output = self
            .surface
            .get_current_texture()
            .expect("Idk what to do in this case.");
        let encoder = self
            .device
            .create_command_encoder(&CommandEncoderDescriptor { label: None });
        Frame {
            g: self,
            encoder,
            view: output
                .texture
                .create_view(&TextureViewDescriptor::default()),
            output,
            translate: Default::default(),
            clip_rect: None,
        }
    }

    pub fn draw<F>(&mut self, func: F)
    where
        F: FnOnce(&mut Frame),
    {
        let mut frame = self.frame();
        func(&mut frame);
        frame.submit();
    }
}

/// Creates a texture that uses MSAA and fits a given swap chain
fn create_multisampled_framebuffer(
    device: &wgpu::Device,
    desc: &wgpu::SurfaceConfiguration,
    sample_count: u32,
) -> wgpu::TextureView {
    let multisampled_frame_descriptor = &wgpu::TextureDescriptor {
        label: Some("Multisampled frame descriptor"),
        size: wgpu::Extent3d {
            width: desc.width,
            height: desc.height,
            depth_or_array_layers: 1,
        },
        mip_level_count: 1,
        sample_count,
        dimension: wgpu::TextureDimension::D2,
        format: desc.format,
        usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
        view_formats: &desc.view_formats,
    };

    device
        .create_texture(multisampled_frame_descriptor)
        .create_view(&TextureViewDescriptor::default())
}
