use std::{io, sync, thread};
use std::collections::HashMap;
use std::future::pending;
use std::ops::Range;
use std::pin::Pin;
use std::process::{ExitStatus, Stdio};
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};

use lazy_static::lazy_static;
use log::log;
use rand::{Rng, thread_rng};
use regex::Regex;
use tokio::io::{AsyncBufReadExt, BufReader, Lines};
use tokio::process::{Child, ChildStdout, Command};
use tokio::select;
use tokio::sync::mpsc::{Receiver as MpscReceiver, Sender as MpscSender};
use tokio::sync::mpsc::{UnboundedReceiver as UbMpscReceiver, UnboundedSender as UbMpscSender};
use tokio::sync::watch::{Receiver, Sender};
use tokio_stream::{Stream, StreamExt};

#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug)]
pub enum AndroidLogLevel {
    VERBOSE = 0,
    DEBUG = 1,
    INFO = 2,
    WARN = 3,
    ERROR = 4,
    FATAL = 5,
}

impl From<&str> for AndroidLogLevel {
    fn from(value: &str) -> Self {
        match value {
            "V" => AndroidLogLevel::VERBOSE,
            "D" => AndroidLogLevel::DEBUG,
            "I" => AndroidLogLevel::INFO,
            "W" => AndroidLogLevel::WARN,
            "E" => AndroidLogLevel::ERROR,
            "F" => AndroidLogLevel::FATAL,
            _ => panic!("Invalid AndroidLogLevel? {}", value),
        }
    }
}

impl From<AndroidLogLevel> for &str {
    fn from(value: AndroidLogLevel) -> Self {
        match value {
            AndroidLogLevel::VERBOSE => { "V" }
            AndroidLogLevel::DEBUG => { "D" }
            AndroidLogLevel::INFO => { "I" }
            AndroidLogLevel::WARN => { "W" }
            AndroidLogLevel::ERROR => { "E" }
            AndroidLogLevel::FATAL => { "F" }
        }
    }
}

impl AndroidLogLevel {
    pub fn get_description(&self) -> &str {
        match *self {
            AndroidLogLevel::VERBOSE => { "Verbose" }
            AndroidLogLevel::DEBUG => { "Debug" }
            AndroidLogLevel::INFO => { "Info" }
            AndroidLogLevel::WARN => { "Warn" }
            AndroidLogLevel::ERROR => { "Error" }
            AndroidLogLevel::FATAL => { "Fatal" }
        }
    }
}

pub struct Log {
    pub resolved: bool,
    pub raw: String,
    pub id: usize,
    pub contains_n: bool,
    pub process: Option<String>,

    st_born_time: Instant,

    // metadata
    time: Range<usize>,
    pid: i32,
    pid_range: Range<usize>,
    tid: i32,
    tid_range: Range<usize>,
    level: AndroidLogLevel,
    tag: Range<usize>,
    content: Range<usize>,
}

impl Log {
    pub fn get_time(&self) -> &str {
        &self.raw.as_str()[self.time.clone()]
    }

    pub fn get_pid(&self) -> i32 {
        self.pid
    }

    pub fn get_pid_str(&self) -> &str {
        &self.raw.as_str()[self.pid_range.clone()]
    }

    pub fn get_tid(&self) -> i32 {
        self.tid
    }

    pub fn get_tid_str(&self) -> &str {
        &self.raw.as_str()[self.tid_range.clone()]
    }

    pub fn get_process(&self) -> &str {
        self.process.as_ref().map(|x| x.as_str()).unwrap_or("")
    }

    pub fn get_level(&self) -> AndroidLogLevel {
        self.level.clone()
    }

    pub fn get_tag(&self) -> &str {
        &self.raw.as_str()[self.tag.clone()]
    }

    pub fn get_content(&self) -> &str {
        if self.resolved {
            &self.raw.as_str()[self.content.clone()]
        } else {
            self.raw.as_str()
        }
    }
}

pub(crate) type AllLog = Arc<Mutex<Vec<Arc<Log>>>>;
type WeakAllLog = sync::Weak<Mutex<Vec<Arc<Log>>>>;
pub(crate) type RawCtlSender = UbMpscSender<RawEventFromUI>;
pub(crate) type RawCtlReceiver = UbMpscReceiver<RawEventFromUI>;

lazy_static!(
    static ref ANDROID_LOG_REGEX: Regex = {
        Regex::new(r"^(\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3})\s+(\d+)\s+(\d+)\s+([A-Z])\s+([\w\[\]\-/\\()-=>@$#.: ]+?):\s+(.+)$").unwrap()
    };

    static ref ANDROID_PS_REGEX: Regex = {
        Regex::new(r"^\s*(\d+)\s+(\S+)").unwrap()
    };
);

#[derive(Clone)]
struct ProcessNameCache {
    name: Option<String>,
    last_active: Instant,
}

async fn new_log(id: usize, raw: String, process_cache: &mut Option<HashMap<i32, ProcessNameCache>>) -> Arc<Log> {
    let mut log = Log {
        resolved: false,
        raw,
        id,
        contains_n: false,
        process: None,
        st_born_time: Instant::now(),
        time: Default::default(),
        pid: 0,
        pid_range: Default::default(),
        tid: 0,
        tid_range: Default::default(),
        level: AndroidLogLevel::VERBOSE,
        tag: Default::default(),
        content: Default::default(),
    };
    let captures = (*ANDROID_LOG_REGEX).captures(log.raw.as_str());
    if let Some(captures) = captures {
        log.resolved = true;
        log.time = captures.get(1).unwrap().range();
        log.pid = captures[2].parse::<i32>().unwrap();
        log.pid_range = captures.get(2).unwrap().range();
        log.tid = captures[3].parse::<i32>().unwrap();
        log.tid_range = captures.get(3).unwrap().range();
        log.level = captures.get(4).unwrap().as_str().into();
        log.tag = captures.get(5).unwrap().range();
        log.content = captures.get(6).unwrap().range();
        log.contains_n = log.get_content().contains('\n');
        if process_cache.is_some() {
            resolve_process(&mut log, process_cache.as_mut().unwrap()).await;
        }
    } else {
        println!("unresolved log ==> {}", log.raw);
    }
    Arc::new(log)
}

// fn resolve_process_by_pid(pid: i32) -> Option<String> {
//     let mut cmd = Command::new("adb");
//     cmd.arg("shell").arg("ps").arg("-A").arg("-o").arg("pid,name");
//     cmd.stdout(Stdio::piped());
//     if unsafe { !PROCESS_CACHE.is_empty() } {
//         cmd.arg("-p").arg(pid.to_string());
//     }
//     None
// }

async fn resolve_process(log: &mut Log, process_cache: &mut HashMap<i32, ProcessNameCache>) {
    let current = Instant::now();
    if let Some(cache) = process_cache.get_mut(&log.pid) {
        if let Some(process) = cache.name.as_ref() {
            if log.level == AndroidLogLevel::FATAL || current.duration_since(cache.last_active) < Duration::from_secs(60) {
                // 持续命中同一个pid的log，说明该进程一直存活
                cache.last_active = current;
                log.process = Some(process.clone());
                return;
            } else {
                // 太久没命中过这个pid，不能确认该进程是否一直存活，需要重新查找
            }
        } else {
            if current.duration_since(cache.last_active) < Duration::from_secs(10) {
                // 这个pid近期查找失败过，短时间内不能重新查找
                return;
            }
        }
    } else {
        if log.level == AndroidLogLevel::FATAL || log.pid == 0 {
            // 不能为突如其来fatal或者pid为0的log查找进程名，这大概率是查不到进程名的崩溃log
            return;
        } else {
            // 未找到这个pid的相关记录，触发重新查找
        }
    }
    let mut cmd = Command::new("adb");
    cmd.arg("shell").arg("ps").arg("-o").arg("pid,name");
    let ps_all: bool;
    if process_cache.is_empty() {
        ps_all = true;
    } else {
        ps_all = false;
        cmd.arg("-p").arg(log.get_pid_str());
    }
    cmd.stdout(Stdio::piped());
    let child = cmd.spawn();
    if let Ok(mut child) = child {
        let mut line_reader = BufReader::new(child.stdout.take().unwrap()).lines();
        loop {
            match line_reader.next_line().await {
                Ok(line_opt) => {
                    match line_opt {
                        None => {
                            let wait_status = child.wait().await;
                            if let Ok(_) = wait_status {
                                break;
                            } else if let Err(err) = wait_status {
                                log::error!("ps wait error: {err}");
                                break;
                            }
                        }
                        Some(line) => {
                            let result = (*ANDROID_PS_REGEX).captures(&line);
                            if let Some(result) = result {
                                let pid = result[1].parse::<i32>().unwrap();
                                let p_name = result[2].to_string();
                                if pid == log.pid {
                                    log.process = Some(p_name.clone());
                                }
                                process_cache.insert(pid, ProcessNameCache {
                                    name: Some(p_name),
                                    last_active: current,
                                });
                            }
                        }
                    }
                }
                Err(error) => {
                    log::error!("ps error: {error}");
                    break;
                }
            }
        }
        if log.process.is_none() {
            process_cache.insert(log.pid, ProcessNameCache {
                name: None,
                last_active: current,
            });
        }
        if ps_all {
            log::info!("ps for all cost {:?} size ==> {}", current.elapsed(), process_cache.len());
        } else {
            log::debug!("ps for {} success {} cost {:?} log ==> {}",
                log.pid, log.process.is_some(), current.elapsed(), log.raw);
        }
    } else if let Err(err) = child {
        // 不可能的情况，既然log都已经收到了，ps就不可能失败
        log::error!("ps launch failed with err {err}");
    }
}

lazy_static!(
    static ref TEST_LOG: Vec<&'static str> = {
        let mut result = Vec::new();
        result.push("06-09 08:57:09.297   872  6402 I android.system.suspend-service: mSuspendCounter++ = 1 android.system.suspend-service: mSuspendCounter++ = 2 android.system.suspend-service: mSuspendCounter++ = 3 android.system.suspend-service: mSuspendCounter++ = 4");
        result.push("06-09 08:57:09.297   872  6402 I android.system.suspend-service: First holding wakelock name = PowerManagerService.WakeLocks");
        result.push("06-09 08:57:09.301 17679 18251 I 17679PAS.PolicyDispatcher: dispatchMultiplePolicy acquire success result = true");
        result.push("06-09 08:57:09.301 17679 18251 I 17679PAS.PolicyManager: handleDispatchRequest  Thread is pool-10-thread-1");
        result.push("06-09 08:57:09.307 17679 18186 W 17679PAS.LocationModeHelper: isLocationServiceEnabled current location mode is 3");
        result.push("06-09 08:57:09.317 17679 17902 F 17679PAS.UserProfileDao: WeakReference have info");
        result.push("06-09 08:57:09.321 17679 18217 I 17679PAS.PolicyManager: acceptHandleResult, hanleResult is PolicyHandleResult{mPolicyId=20057, mIsNeedDispatch=false}");
        result.push("06-09 08:57:09.366 17679 18251 E 17679PAS.PolicyDispatcher: onDispatchResult  Thread is pool-10-thread-1");
        result.push("06-09 08:57:09.369   872  6402 I android.system.suspend-service: Last release wakelock name = PowerManagerService.WakeLocks");
        result.push("06-09 08:57:09.956  1380  1423 D vendor.oplus.hardware.charger-V3-service: read content:,298,308,4400,4400,-132,500,1100,100,100,1,4,406,4605,100,0,1048704,1,1,0,0,4435,0,0,0,7,0,0,0,-2740,0,4,1,0,0,0,0,0,0");
        result.push("06-09 08:57:09.956  1380  1423 V vendor.oplus.hardware.charger-V3-service: , ret_value:119");
        result
    };
);

pub const MAXIMUM_LOG_SIZE: usize = 100_0000;
pub const DRAIN_LOG_SIZE: usize = 10_0000;

struct LogPublisher<'a, 'b> {
    session_tx: &'a MpscSender<SessionEvent>,
    statistics_tx: &'b UbMpscSender<()>,
    invalidate_tx: Option<Sender<()>>,

    next_log_id: usize,
    session_id: i32,
    session_started: bool,

    all_log: AllLog,
    process_cache: Option<HashMap<i32, ProcessNameCache>>,

    statistics_start_log_id: usize,
    session_start_time: Option<Instant>,
}

impl<'a, 'b> LogPublisher<'a, 'b> {
    fn new(session_tx: &'a MpscSender<SessionEvent>, st_tx: &'b UbMpscSender<()>,session_id: i32, resolve_process_needed: bool) -> Self {
        Self {
            session_tx,
            statistics_tx: st_tx,
            invalidate_tx: None,
            next_log_id: 0,
            session_id,
            session_started: false,
            all_log: Arc::new(Mutex::new(Vec::with_capacity(2000))),
            process_cache: if resolve_process_needed {
                Some(HashMap::with_capacity(100))
            } else {
                None
            },

            statistics_start_log_id: 0,
            session_start_time: None,
        }
    }

    fn register_global_log_map(&self, global_log: &mut HashMap<i32, WeakAllLog>) {
        global_log.insert(self.session_id, Arc::downgrade(&self.all_log));
    }

    fn detach_and_start_new_session(&mut self, new_session_id: i32) {
        let new_all_log;
        {
            let all_log_locked = self.all_log.lock().unwrap();
            let cost = Instant::now();
            new_all_log = Arc::new(Mutex::new(all_log_locked.clone()));
            log::info!("clone all raw log cost: {:?} size: {}", cost.elapsed(), all_log_locked.len());
        }
        self.all_log = new_all_log;
        self.session_started = false;
        self.statistics_start_log_id = 0;
        self.session_id = new_session_id;
    }

    async fn handle_new_log(&mut self, log: String) {
        if !self.session_started {
            let (tx, rx) = tokio::sync::watch::channel(());
            if let Err(err) = self.session_tx.send(SessionEvent {
                session_id: self.session_id,
                meta_data: SessionMetaData::NewSession(self.all_log.clone(), tx.clone(), rx),
            }).await {
                log::error!("send session error?! {}", err);
            }
            self.invalidate_tx = Some(tx);
            self.session_started = true;
            self.session_start_time = Some(Instant::now());
        }

        let mut curr_len;
        let new_log = new_log(self.next_log_id, log, &mut self.process_cache).await;
        {
            let mut log_collection = self.all_log.lock().unwrap();
            log_collection.push(new_log);
            self.next_log_id += 1;
            curr_len = log_collection.len();
            if curr_len > MAXIMUM_LOG_SIZE {
                log_collection.drain(0..curr_len - DRAIN_LOG_SIZE);
                log::info!("drain raw log from {} to {}", curr_len, log_collection.len());
                self.statistics_tx.send(()).unwrap();
                curr_len = log_collection.len();
            }
        }

        if let Err(err) = self.invalidate_tx.as_ref().unwrap().send(()) {
            log::error!("invalidate_tx send err {err}");
        }
    }

    async fn publish_statistics_if_needed(&mut self) {
        if self.session_started {
            if let Some(statistics) = self.run_statistics() {
                if let Err(err) = self.session_tx.send(SessionEvent {
                    session_id: self.session_id,
                    meta_data: statistics}).await {
                    log::error!("send usage percentage from session {} error {}?!", self.session_id, err);
                }
            }
        }
    }

    fn run_statistics(&mut self) -> Option<SessionMetaData> {
        let now = Instant::now();
        let all_log_locked = self.all_log.lock().unwrap();
        let fraction = all_log_locked.len() as f32 / MAXIMUM_LOG_SIZE as f32;
        let first_log_id = all_log_locked.first().map_or(0, |log| log.id);
        let st_log_index = self.statistics_start_log_id.saturating_sub(first_log_id);
        // 从st_log_index开始向前找到距离现在小于等于1分钟的那条log
        if let Some(mark_log) = all_log_locked.iter()
            .skip(st_log_index)
            .find(|log| {
                now.duration_since(log.st_born_time) <= Duration::from_secs(60)
            }) {
            if self.statistics_start_log_id != mark_log.id {
                log::info!("statistics id update from {} to {}", self.statistics_start_log_id, mark_log.id);
                self.statistics_start_log_id = mark_log.id;
            }
            let last_log = all_log_locked.last().expect("mark log exists but miss last one?");
            let log_count_in_one_st_period: u32 = (last_log.id - mark_log.id) as u32;
            let sec_in_one_st_period: u32 = (last_log.st_born_time - mark_log.st_born_time).as_secs() as u32;
            if sec_in_one_st_period > 1 {
                let alps = log_count_in_one_st_period / sec_in_one_st_period;
                let log_eta_sec = (MAXIMUM_LOG_SIZE - all_log_locked.len()) as u32 / alps;
                return Some(SessionMetaData::BufferUsageStatistics(fraction, log_eta_sec));
            }
        }
        None
    }
}

enum RemoteDevice {
    RealAdbDevice {
        process: Child,
        line_reader: Lines<BufReader<ChildStdout>>,
    },
    FakeDevice(Option<Pin<Box<dyn Stream<Item=String>>>>)
}

struct RemoteProcess<'a, 'b> {
    remote: RemoteDevice,
    publisher: LogPublisher<'a, 'b>,
}

impl<'a, 'b> RemoteProcess<'a, 'b> {
    fn new(remote: RemoteDevice, session_id: i32, session_tx: &'a MpscSender<SessionEvent>, st_tx: &'b UbMpscSender<()>) -> Self {
        let need_resolve_process = match &remote {
            RemoteDevice::RealAdbDevice { .. } => { true }
            RemoteDevice::FakeDevice { .. } => { false }
        };
        Self {
            remote,
            publisher: LogPublisher::new(session_tx, st_tx, session_id, need_resolve_process),
        }
    }

    fn register_global_log_map(&self, global_log: &mut HashMap<i32, WeakAllLog>) {
        self.publisher.register_global_log_map(global_log);
    }

    fn detach_and_start_new_session(&mut self, new_session_id: i32) {
        self.publisher.detach_and_start_new_session(new_session_id);
    }

    async fn wait_for_exit(&mut self) -> io::Result<ExitStatus> {
        match &mut self.remote {
            RemoteDevice::RealAdbDevice { process, .. } => {
                process.wait().await
            }
            RemoteDevice::FakeDevice { .. } => {
                pending().await
            }
        }
    }

    async fn handle_next_line(&mut self) -> bool {
        loop {
            match &mut self.remote {
                RemoteDevice::RealAdbDevice { line_reader , ..} => {
                    match line_reader.next_line().await {
                        Ok(line_opt) => return match line_opt {
                            None => { false }
                            Some(line) => {
                                self.publisher.handle_new_log(line).await;
                                true
                            }
                        },
                        Err(err) => {
                            log::error!("invalid log line read {err}");
                        }
                    }
                }
                RemoteDevice::FakeDevice( fake_stream ) => {
                    if fake_stream.is_none() {
                        *fake_stream = Some(Box::pin(async_stream::stream! {
                            let mut random = thread_rng();
                            loop {
                                let rand_index = random.gen_range(0..TEST_LOG.len());
                                let sleep_duration = random.gen_range(0..50);
                                if let Some(string) = TEST_LOG.get(rand_index) {
                                    yield string.to_string();
                                }
                                tokio::time::sleep(Duration::from_millis(sleep_duration)).await;
                            }
                        }));
                    }
                    let element = fake_stream.as_mut().unwrap().next().await;
                    return if let Some(line) = element {
                        self.publisher.handle_new_log(line).await;
                        true
                    } else {
                        false
                    }
                }
            }
        }
    }
}

fn new_adb_process<'a, 'b>(session_id: i32, session_tx: &'a MpscSender<SessionEvent>, output_test: bool, st_tx: &'b UbMpscSender<()>) -> io::Result<RemoteProcess<'a, 'b>> {
    let device: RemoteDevice = if output_test {
        RemoteDevice::FakeDevice(None)
    } else {
        let mut child = Command::new("adb").arg("logcat").stdout(Stdio::piped()).spawn()?;
        let line_reader = BufReader::new(child.stdout.take().unwrap()).lines();
        RemoteDevice::RealAdbDevice {
            process: child,
            line_reader,
        }
    };
    Ok(RemoteProcess::new(device, session_id, session_tx, st_tx))
}

#[tokio::main(flavor = "current_thread")]
async fn collect_log(mut ctl_rx: RawCtlReceiver, session_tx: MpscSender<SessionEvent>) -> io::Result<()> {
    let output_test = std::env::args().any(|arg| {
        arg == "-test"
    });
    let mut all_log_map: HashMap<i32, WeakAllLog> = HashMap::new();
    let mut next_session_id = 0;
    let mut process_keep_use: Option<io::Result<RemoteProcess>> = None;
    let (st_tx, mut st_rx) = tokio::sync::mpsc::unbounded_channel();
    let st_tx_from_drain_use = st_tx.clone();
    tokio::spawn(async move {
        loop {
            tokio::time::sleep(Duration::from_secs(1)).await;
            st_tx.send(()).unwrap();
        }
    });
    loop {
        let mut remote_process = process_keep_use.take().unwrap_or_else(|| {
            let adb = new_adb_process(next_session_id, &session_tx, output_test, &st_tx_from_drain_use);
            next_session_id += 1;
            adb
        })?;
        remote_process.register_global_log_map(&mut all_log_map);
        loop {
            select! {
                _ = st_rx.recv() => {
                    remote_process.publisher.publish_statistics_if_needed().await;
                }
                event_from_ui = ctl_rx.recv() => {
                    if let Some(event) = event_from_ui {
                        match event {
                            RawEventFromUI::DrainLog{ session_id, least_id } => {
                                if let Some(weak_log) = all_log_map.get(&session_id) {
                                    let curr_len;
                                    if let Some(all_log) = weak_log.upgrade() {
                                        let mut log_collection = all_log.lock().unwrap();
                                        if let Some(first_log) = log_collection.first() {
                                            let first_log_id = first_log.id;
                                            if least_id > first_log_id {
                                                let drain_count = least_id - first_log_id;
                                                let old_size = log_collection.len();
                                                log_collection.drain(0..drain_count);
                                                let new_size = log_collection.len();
                                                if session_id == remote_process.publisher.session_id {
                                                    remote_process.publisher.statistics_tx.send(()).unwrap();
                                                }
                                                log::info!("drain raw log from session {session_id} by least {least_id} from user size {old_size} -> {new_size}");
                                            } else {
                                                log::debug!("drain raw log abort by invalid least {least_id} first id {first_log_id}");
                                            }
                                        }
                                        curr_len = log_collection.len();
                                    } else {
                                        log::error!("impossible situation, drain request from a dropped session {session_id}?!");
                                        curr_len = 0;
                                    }
                                } else {
                                    log::warn!("drain request from a invalid session {session_id}");
                                }
                            }
                            RawEventFromUI::StartNewSession => {
                                remote_process.detach_and_start_new_session(next_session_id);
                                process_keep_use = Some(Ok(remote_process));
                                next_session_id += 1;
                                break;
                            }
                        }
                    } else {
                        log::error!("ctl tx is dropped by ui??");
                    }
                }
                has_more = remote_process.handle_next_line() => {
                    if !has_more {
                        let wait_status = remote_process.wait_for_exit().await;
                        if let Ok(exit_status) = wait_status {
                            log::warn!("adb logcat process is shutdown exit code is {exit_status}");
                            tokio::time::sleep(Duration::from_millis(500)).await;
                            break;
                        } else if let Err(err) = wait_status {
                            log::warn!("adb logcat process is shutdown unexpectedly {err}");
                        }
                    }
                }
            }
        }
    }
}

#[derive(Clone)]
pub struct SessionEvent {
    pub session_id: i32,
    pub meta_data: SessionMetaData,
}

#[derive(Clone)]
pub enum SessionMetaData {
    BufferUsageStatistics(f32/*usage*/, u32/*ETA sec*/),
    NewSession(AllLog, Sender<()>, Receiver<()>),
}

pub enum RawEventFromUI {
    DrainLog {
        session_id: i32,
        least_id: usize,
    },
    StartNewSession
}

pub fn start_collect_log() -> (RawCtlSender, MpscReceiver<SessionEvent>) {
    let (tx, rx) = tokio::sync::mpsc::channel(5);
    let (ctl_tx, ctl_rx) = tokio::sync::mpsc::unbounded_channel();
    thread::Builder::new().name("raw_collector".to_string()).spawn(move || {
        let _ = collect_log(ctl_rx, tx);
    }).unwrap();
    (ctl_tx, rx)
}