#![allow(unused_imports)]
#![allow(dead_code)]

use dotenv::dotenv;
use std::env::var ;
use std::collections::{HashSet,HashMap} ;
use std::sync::{Arc, Mutex};

use std::net::{IpAddr, Ipv4Addr, SocketAddr};

use futures::future::{ready};
use futures::future ;
use futures::stream::StreamExt;

use rdkafka::client::ClientContext;
use rdkafka::consumer::{CommitMode, Consumer, ConsumerContext, Rebalance};
use rdkafka::error::KafkaResult;
use rdkafka::topic_partition_list::TopicPartitionList;
use rdkafka::consumer::stream_consumer::StreamConsumer;

use rdkafka::message::{Message} ;
use rdkafka::config::ClientConfig;
use rdkafka::producer::{FutureProducer, FutureRecord};
use schema_registry_converter::async_impl::schema_registry::SrSettings;
use schema_registry_converter::async_impl::avro::AvroDecoder ;
use schema_registry_converter::types::{Value as AvroValue};

use sqlx::ConnectOptions ;
use sqlx::postgres::{
    PgConnectOptions, PgConnection, PgDatabaseError, PgErrorPosition, PgSeverity,
};
use sqlx::postgres::{PgPoolOptions, PgRow, Postgres};

use serde_json ;
use std::str::FromStr;
use utils::* ;

mod kafka {
    use super::* ;

    pub struct CustomContext;
    impl ClientContext for CustomContext {}
    impl ConsumerContext for CustomContext {
        fn pre_rebalance(&self, _rebalance: &Rebalance) {
        }
        
        fn post_rebalance(&self, _rebalance: &Rebalance) {
        }

        fn commit_callback(&self, _result: KafkaResult<()>, _offsets: &TopicPartitionList) {
        }
    }

    pub fn mk_sr(hosts: &str) -> SrSettings {
        let mut sr_hosts = hosts.split(',') ;
        let first_sr_host = sr_hosts.next().expect("at least one ACCESS_KAFKA_SR_HOSTS needed!") ;
        let mut sr_settings_builder = SrSettings::new_builder(first_sr_host.into()) ;
        sr_hosts.for_each(|x| {sr_settings_builder.add_url(x.into());}) ;
        sr_settings_builder.build().expect("kafka-sr build error!")
    }
}

mod utils {
    use super::* ;
    pub fn resolve_socket_addr(hosts: &str) -> Option<IpAddr> {
        let ips = get_if_addrs::get_if_addrs()
            .unwrap()
            .iter()
            .map(|interface| interface.ip())
            .collect::<HashSet<_>>();
        hosts 
            .split(",")
            .filter_map(|x| x.parse::<IpAddr>().ok())
            .filter(|x| ips.contains(&x))
            .nth(0)
    }

}

mod avro {
    use super::* ;

    pub fn as_map(v: &AvroValue) -> HashMap<String, AvroValue> {
        match v {
            AvroValue::Record(_, items) => items.to_owned().into_iter().collect(),
            _ => HashMap::new(),
        }
    }

    pub fn as_int(v: &AvroValue) -> Option<i32> {
        match *v {
            AvroValue::Int(i) => Some(i),
            _ => None,
        }
    }

    pub fn as_string(v: &AvroValue) -> Option<String> {
        match v {
            AvroValue::String(s) => Some(s.to_string()),
            AvroValue::Union(b) => {
                match &**b {
                    AvroValue::String(s) => Some(s.to_string()),
                    _ => None
                }
            },
            _ => None,
        }
    }
}

pub async fn run() -> Result<(), Box<dyn std::error::Error>> {
    dotenv::from_filename("meta-lib-static/env/.env").ok();

    let env_hosts = var("EXCHANGE_CHANNEL_DC_PUSH_HOSTS").unwrap_or("127.0.0.1".into()) ;
    if resolve_socket_addr(&env_hosts).is_none() {
        println!("EXCHANGE_CHANNEL_DC_PUSH_HOSTS not match! skip") ;
        return Ok(()) ;
    }

    let env_kafkas = var("EXCHANGE_CHANNEL_DC_PUSH_IN_KAFKAS").unwrap_or("127.0.0.1:9092".into()) ;
    let env_kafka_srs = var("EXCHANGE_CHANNEL_DC_PUSH_IN_KAFKA_SRS").unwrap_or("127.0.0.1:8081".into()) ;
    let in_topic = var("EXCHANGE_CHANNEL_DC_PUSH_IN_KAFKA_NAMES").unwrap_or("data_pipeline_dev_option_enum".into()) ;

    let env_pgs = var("EXCHANGE_CHANNEL_DC_PUSH_OUT_PG_NODES").unwrap_or("127.0.0.1".into()) ;
    let env_pg_dbname = var("EXCHANGE_CHANNEL_DC_PUSH_OUT_PG_DBNAME").unwrap_or("postgres".to_string()) ;
    let env_pg_username = var("EXCHANGE_CHANNEL_DC_PUSH_OUT_PG_USERNAME").expect("NO EXCHANGE_CHANNEL_DC_PUSH_OUT_PG_USERNAME FOUND!") ;
    let env_pg_password = var("EXCHANGE_CHANNEL_DC_PUSH_OUT_PG_PASSWORD").expect("EXCHANGE_CHANNEL_DC_PUSH_OUT_PG_PASSWORD FOUND!") ;

    let out_name = var("EXCHANGE_CHANNEL_DC_PUSH_OUT_PG_NAMES").unwrap_or("option_values".to_string()) ;

    println!("kafkas: {}, kafka_srs: {}, in_topic: {}", env_kafkas, env_kafka_srs, in_topic) ;
    println!("connect pg: {}", &env_pgs) ; 

    let pg_uri = format!("postgres://{}:{}@{}/{}", &env_pg_username, &env_pg_password, &env_pgs, &env_pg_dbname) ;
    let mut out_pg_conn = PgConnectOptions::from_str(&pg_uri).unwrap().connect().await? ;
    
    let params = "select tenant_id, jdbc_url, schema, username, password from sys_tenant_datasource where application_name = 'nexus-dc-management-service'" ;

    use futures::TryStreamExt;
    use sqlx::Row ;
    let mut rows = sqlx::query(params).fetch(&mut out_pg_conn) ;
    let mut pg_conns = HashMap::new();
    while let Some(row) = rows.try_next().await? {
        let tenant_id: i64 =  row.try_get("tenant_id").unwrap() ;
        let tenant_id_text = tenant_id.to_string() ;
        let jdbc_url: &str = row.try_get("jdbc_url").unwrap() ;

        let schema: &str = row.try_get("schema").unwrap() ;
        let schema_text = schema.to_string() ;
        let username: &str = row.try_get("username").unwrap() ;
        let password: &str = row.try_get("password").unwrap() ;

        
        use regex::Regex ;
        let re = Regex::new(r"jdbc:postgresql://(?P<node>.*)/(?P<dbname>\w+)").unwrap();
        let caps = re.captures(jdbc_url).unwrap() ;
        
        let node = &caps["node"] ;
        let dbname = &caps["dbname"] ;

        let tenant_pg_uri = format!("postgres://{}:{}@{}/{}", username, password, node, dbname) ;
        
        println!("tenant_pg_uri:{}", tenant_pg_uri) ;
        let mut tenant_pg_conn = PgConnectOptions::from_str(&tenant_pg_uri).unwrap().connect().await? ;
        
        let ddl = format!(
          "CREATE TABLE IF NOT EXISTS \"{}\".{} (
             id integer not null,
             option_name varchar(50),
             option_value varchar(200),
             option_value_type varchar(50),
             create_time timestamp(0) DEFAULT CURRENT_TIMESTAMP,
             CONSTRAINT \"{}_option_values_pkey\" PRIMARY KEY (id)
          )", &schema_text, &out_name, &schema_text) ;

        println!("ddl: {}", ddl) ;
        sqlx::query(&ddl).execute(&mut tenant_pg_conn).await.expect("ddl error!") ;
        pg_conns.insert(tenant_id_text, (tenant_pg_conn, schema_text)) ;
    }
    
    let out_pg_conn_arc = &Arc::new(Mutex::new(pg_conns)) ;

    let decoder = AvroDecoder::new(kafka::mk_sr(&env_kafka_srs));
    let decoder_arc = &Arc::new(Mutex::new(decoder));

    
    let consumer: StreamConsumer<kafka::CustomContext> = ClientConfig::new()
        .set("group.id", "exchange-channel-dc-push")
        .set("bootstrap.servers", &env_kafkas)
        .set("auto.offset.reset", "smallest")
        .set("enable.auto.commit", "true")
        .create_with_context(kafka::CustomContext)
        .expect("Consumer creation failed") ;

    consumer.subscribe(&[in_topic.as_str()][..]).expect("Can't subscribe to specified topics") ;

    let out_name_ref = &out_name ;
    consumer.start()
        .filter_map( |x| future::ready( x.ok() ) )
        .filter_map( |x| async move {
            let decode_one = decoder_arc.clone() ;
            let mut decode_mut = decode_one.lock().unwrap() ;
            decode_mut.decode(x.payload()).await.ok()
        }).for_each(|x| async move {
            let row = avro::as_map(&x.value) ;
            let cluster_val = avro::as_string(row.get("cluster_val").unwrap()).unwrap() ;
            let cluster_val_json: serde_json::Value = serde_json::from_str(&cluster_val).unwrap() ;
            let tenant_id = cluster_val_json[0].as_str().unwrap() ;
            println!("tenant_id: {}", tenant_id) ;

            let pg_conns = out_pg_conn_arc.clone() ;
            let mut pg_conns_mut = pg_conns.lock().unwrap() ;
            if let Some((tenant_pg_conn, schema_str)) = pg_conns_mut.get_mut(tenant_id) {
                let sql = format!("INSERT INTO \"{}\".{} (id, option_name, option_value) VALUES ($1, $2, $3)", schema_str, out_name_ref) ;

                let option_name = avro::as_string(row.get("option_name").unwrap()) ;
                let option_name_new = match option_name {
                    Some(x) if x == "screen.screen_height:screen.screen_width" => Some("screen_height_screen_width".to_string()),
                    Some(x) => Some(x.split('.').collect::<Vec<_>>().last().unwrap().to_string()),
                    None => None
                } ;

                let option_val: Option<String> = avro::as_string(row.get("option_val").unwrap()) ;
                let option_val_new: Option<String> = match option_val {
                    Some(x) => {
                        let json : serde_json::Value = serde_json::from_str(&x).unwrap() ;
                        let join_str = json.as_array().unwrap().iter().map(|x|
                            x.as_str().unwrap()
                        ).collect::<Vec<_>>().join(",") ;
                        Some(join_str)
                    },
                    None => None
                } ; 
                sqlx::query(&sql)
                    .bind(avro::as_int(row.get("enum_key").unwrap()))
                    .bind(option_name_new)
                    .bind(option_val_new)
                    .execute( &mut *tenant_pg_conn )
                    .await.unwrap() ;
            }
        }).await ;
    Ok(())
    
}
