#![allow(unused_imports)]
#![allow(unused_variables)]

use std::time::Duration;
use std::collections::{HashSet, HashMap} ;
use std::net::SocketAddr ;
use std::str;

use futures::future::{ready};
use futures::stream::{Stream, StreamExt};

use rdkafka::message::{Message} ;
use rdkafka::config::ClientConfig;
use rdkafka::producer::{FutureProducer, FutureRecord};

use schema_registry_converter::schema_registry_common::{SuppliedSchema, SchemaType} ;
use schema_registry_converter::async_impl::schema_registry::{SrSettings, post_schema} ;

use rdkafka::client::ClientContext;
use rdkafka::consumer::{CommitMode, Consumer, ConsumerContext, Rebalance};
use rdkafka::error::KafkaResult;
use rdkafka::topic_partition_list::TopicPartitionList;
use rdkafka::consumer::stream_consumer::StreamConsumer;

use avro_rs::{Schema, from_avro_datum, to_avro_datum} ;

use base64::{encode, decode};

use dotenv::dotenv;
use std::env::var ;

use std::io::Write ;
use byteorder::{BigEndian, ByteOrder} ;
use rust_embed::RustEmbed;

use self::utils::* ;

#[derive(RustEmbed)]
#[folder = "avsc"]
struct AvscAsset;


mod utils {
    use super::* ;
    pub fn resolve_socket_addr(hosts: &str) -> Option<SocketAddr> {
        let ips = get_if_addrs::get_if_addrs().unwrap().iter()
            .map(|interface| interface.ip()).collect::<HashSet<_>>();
        hosts.split(",")
            .map(|x| x.replace("http://", ""))
            .filter_map(|x| x.parse::<SocketAddr>().ok())
            .filter(|x| ips.contains(&x.ip()))
            .next()
    }
    pub fn get_payload(id: u32, encoded_bytes: Vec<u8>) -> Vec<u8> {
        let mut payload = vec![0u8];
        let mut buf = [0u8; 4];
        BigEndian::write_u32(&mut buf, id);
        payload.extend_from_slice(&buf);
        payload.extend_from_slice(encoded_bytes.as_slice());
        payload
    }


}

mod kafka {
    use super::* ;

    pub struct CustomContext;
    impl ClientContext for CustomContext {}
    impl ConsumerContext for CustomContext {
        fn pre_rebalance(&self, _rebalance: &Rebalance) {
        }
        
        fn post_rebalance(&self, _rebalance: &Rebalance) {
        }
        
        fn commit_callback(&self, _result: KafkaResult<()>, _offsets: &TopicPartitionList) {
        }
    }
}

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
    dotenv().ok();

    let env_hosts = var("EXCHANGE_BUFFER_KAFKA_HOSTS").unwrap_or("127.0.0.1:0".into()) ;
    if resolve_socket_addr(&env_hosts).is_none() {
        println!("EXCHANGE_BUFFER_KAFKA_HOSTS not match! skip") ;
        return Ok(()) ;
    }
    let in_kafka_prefix = var("EXCHANGE_BUFFER_KAFKA_IN_KAFKA_PREFIX").unwrap_or("data_buffer_dev".into()) ;

    let buffer_kafka_hosts = var("EXCHANGE_BUFFER_KAFKA_IN_KAFKAS").expect("BUFFER_KAFKA_HOSTS is missing") ;
    let access_kafka_hosts = var("EXCHANGE_BUFFER_KAFKA_OUT_KAFKAS").unwrap_or("127.0.0.1:9092".into()) ;
    let access_kafka_sr_hosts = var("EXCHANGE_BUFFER_KAFKA_OUT_KAFKA_SRS").unwrap_or("http://127.0.0.1:8081".into()) ;

    let schema_bs = AvscAsset::get("dc_sdk_dev.avsc").expect("dc_sdk_dev.avsc not found!") ;
    let schema_s = str::from_utf8(&schema_bs).expect("from_utf8 error!") ;
    

    let mut sr_hosts = access_kafka_sr_hosts.split(",") ;
    let first_sr_host = sr_hosts.next().expect("at least one ACCESS_KAFKA_SR_HOSTS needed!") ;
    let mut sr_settings_builder = SrSettings::new_builder(first_sr_host.into()) ;
    sr_hosts.for_each(|x| {sr_settings_builder.add_url(x.into());}) ;
    let sr_settings = sr_settings_builder.build().expect("kafka-sr build error!") ;

    let out_topic = format!("{}_{}", in_kafka_prefix, "dc_sdk_dev") ;
    let buffer_avro_topic = format!("{}_{}_avro", in_kafka_prefix, "dc_sdk_dev") ;
    let registered_schema = post_schema(&sr_settings, out_topic.clone(), SuppliedSchema {
        name: None,
        schema_type: SchemaType::Avro,
        schema: schema_s.into(),
        references: vec![],
    }).await.expect("post schema error!") ;

    let schema_id = registered_schema.id ;
    println!("registered_schema: {:?}", schema_id) ;

    let consumer: StreamConsumer<kafka::CustomContext> = ClientConfig::new()
        .set("group.id", "exchange-buffer-kafka-rust")
        .set("bootstrap.servers", &buffer_kafka_hosts) 
        .set("auto.offset.reset", "smallest")
        .set("enable.auto.commit", "true")
        .create_with_context(kafka::CustomContext) 
        .expect("Consumer creation failed");

    let producer: &FutureProducer = &ClientConfig::new()
        .set("bootstrap.servers", &access_kafka_hosts) 
        .set("compression.type", "zstd")
        .create()
        .expect("Producer creation error");


    let topics = [buffer_avro_topic.as_str()] ;
    consumer.subscribe(&topics[..]).expect("Can't subscribe to specified topics") ;

    println!("buffer_kafka_hosts: {}", &buffer_kafka_hosts) ;
    println!("topic: {:?}", topics) ;

    use futures::prelude::*;

    let schema_bs = AvscAsset::get("dc_sdk_dev.avsc").expect("dc_sdk_dev.avsc not found!") ;
    let schema_s = str::from_utf8(&schema_bs).expect("from_utf8 error!") ;
    let schema = Schema::parse_str(&schema_s).expect("avsc schema error!") ;

    let out_topic_str = &out_topic ;
    consumer.start() 
        .filter_map( |x| future::ready(x.ok()))
        .filter_map( |x| future::ready(
            zstd::decode_all(x.payload().unwrap()).ok().map(move |y| (x.topic().to_owned(), y))
        ))
        .filter_map( |(topic, x)| future::ready({ 
            from_avro_datum(&schema, &mut x.as_slice(), None).ok().map(|x| (topic, x))
        }))
        .filter_map( |(topic, x)| future::ready({
            to_avro_datum(&schema, x).ok().map(|x| (topic, x))
        }))
        .for_each(|(topic, x)| async move {
            let schema_bs = get_payload(schema_id, x) ;
            let record : FutureRecord<Vec<u8>, Vec<u8>> = FutureRecord::to(out_topic_str).payload(&schema_bs) ;

            producer.send(record, Duration::from_secs(0)).await.unwrap() ;
        }).await ;


    println!("finished!") ;

    Ok(()) 
}

