/**
 * @file    cpu_usage.c
 * @author  liudechao email:liudechao@topband.com.cn
 * @version V0.1.0
 * @date    2024-04-14
 * @brief   CPU利用率与线程利用率
 * @note    
 * @see     cpu_usage
 * @verbatim 
 * @endverbatim 
 * @since   
 * 版本|修改人|修改日期|修改内容
 * ------|----|------|--------
 * V0.1.0|liudechao|2024-04-14|创建源码
 * @copyright 
 *  Topband 私有代码
 * <h2><center>&copy;Topband Copyright (c) 2024 liudechao.
 * All rights reserved.</center></h2>
 * 
 */
#include "cpu_usage.h"
#include "board.h"
#ifndef CPU_USAGE_PERIOD_TICK
#define CPU_USAGE_PERIOD_TICK CPU_CALCULATED_RATE
#endif

static struct rt_work g_cpu_work;
thread_runtime_trace_def thread_runtime_trace = {0};
thread_run_info_def thread_run_info[RT_THREAD_PRIORITY_MAX] = {0};

/**
 * @brief  计算线程执行时间
 * @param  from: 当前线程
 * @param  to: 目标线程
 * @pre    注册相应的钩子函数
 * @post   
 * @note   
 * @attention
 * @see    
 * @warning
 */
static void thread_stats_scheduler_hook(struct rt_thread *from, struct rt_thread *to)
{
	Tick_TypeDef *p_time = (Tick_TypeDef *)from->user_data;
	
	static Tick_TypeDef schedule_last_time = {0};
	
    Tick_TypeDef current_time = rt_tick_get_all();
    
	Tick_TypeDef calculate_time = rt_tick_interval_get(schedule_last_time, current_time);
	
	p_time->u32_ms += calculate_time.u32_ms;
	p_time->u32_us += calculate_time.u32_us;
	p_time->u32_ns += calculate_time.u32_ns;
	
	rt_memcpy(&schedule_last_time, &current_time, sizeof(Tick_TypeDef));
}


cpu_usage_t *cpu_usage_obj(void)
{
    static struct cpu_usage _usage;
    return &_usage;
}

static void timeout(void *param)
{
    cpu_usage_t *obj = param;

    if (obj->state == CPU_USAGE_STATE_ACTIVATED)
    {
        if (++obj->period == CPU_USAGE_PERIOD_TICK)
        {
            obj->period = 0;
            obj->idle_stat[0].load = CPU_USAGE_PERIOD_TICK - 
                (obj->idle_stat[0].idle_tick - obj->idle_stat[0].last_tick);
            obj->idle_stat[0].last_tick = obj->idle_stat[0].idle_tick;
        }

        if (rt_thread_self() == obj->idle_stat[0].tid)
        {
            obj->idle_stat[0].idle_tick++;
        }
    }
}

void cpu_usage_deinit(void)
{
    cpu_usage_t *obj = cpu_usage_obj();
    rt_timer_t t = &obj->time;

    if (rt_object_get_type(&t->parent) == RT_Object_Class_Timer)
    {
        rt_timer_stop(t);
        rt_timer_detach(t);
        rt_memset(obj, 0, sizeof(*obj));
    }
}

void cpu_usage_suspend(void)
{
    cpu_usage_t *obj = cpu_usage_obj();
    rt_timer_t t = &obj->time;

    if (obj->state == CPU_USAGE_STATE_ACTIVATED)
    {
        rt_timer_stop(t);
        obj->state = CPU_USAGE_STATE_SUSPEND;
        obj->suspend_tick = rt_tick_get();
    }
}

void cpu_usage_resume(void)
{
    cpu_usage_t *obj = cpu_usage_obj();
    rt_timer_t t = &obj->time;
    rt_tick_t tick;
    int i;

    if (obj->state == CPU_USAGE_STATE_SUSPEND)
    {
        tick = rt_tick_get() - obj->suspend_tick;
        for (i = 0; i < obj->cpus; i++)
        {
            obj->idle_stat[i].idle_tick += tick;
        }
        obj->state = CPU_USAGE_STATE_ACTIVATED;
        rt_timer_start(t);
    }
}

float cpu_load_average(void)
{
    cpu_usage_t *obj = cpu_usage_obj();
    rt_tick_t usage = 0;
    float load = 0.0;
    int i;

    if (obj->state == CPU_USAGE_STATE_ACTIVATED)
    {
        for (i = 0; i < obj->cpus; i++)
        {
            usage += obj->idle_stat[i].load;
        }
        load = 100.0 * usage / (CPU_USAGE_PERIOD_TICK * obj->cpus);
    }
    return load;
}



/* �����̳߳�ʼ����ɺ��ٳ�ʼ�� */
int thread_usage_init(void)
{
#ifdef CPU_THREAD_PRINT
	uint8_t i = 0;
	uint8_t *p_data = NULL;
	struct rt_list_node *node;
    struct rt_list_node *list;
    struct rt_thread *thread;
	struct rt_object_information *information;
	
	information = rt_object_get_information(RT_Object_Class_Thread);
	RT_ASSERT(information != RT_NULL);
	
	/* ���������߳� */
	for(i = 0, node  = information->object_list.next; node != &(information->object_list); node  = node->next, i++)
	{
		thread = (rt_thread_t)rt_list_entry(node, struct rt_object, list);
		
		RT_ASSERT(thread->user_data == RT_NULL);
		
		p_data =  rt_malloc_align(sizeof(Tick_TypeDef) + 4,4);
		
		RT_ASSERT(p_data != RT_NULL);
		
		rt_memset(p_data, 0, sizeof(Tick_TypeDef));
		
		rt_memset(p_data + sizeof(Tick_TypeDef), 0xff, 4);
		
		RT_ASSERT( *(uint32_t*)(p_data + sizeof(Tick_TypeDef)) = 0xFFFFFFFF);
		
		thread->user_data = (rt_ubase_t)p_data;
	}
	
	rt_scheduler_sethook(thread_stats_scheduler_hook);
#endif
	return 0;
}	


void cal_thread_usage(void)
{
	float idle_percent = 1.0f;
	static Tick_TypeDef last_time = {0}; 
    Tick_TypeDef cur_time = {0}; 
    struct rt_list_node *node;
    struct rt_list_node *list;
    struct rt_thread *thread;
	uint64_t whole_time = 0;
    uint8_t i;
	
	cur_time = rt_tick_get_all();
	
	Tick_TypeDef total_time = rt_tick_interval_get(last_time, cur_time);
	
    rt_memcpy(&last_time, &cur_time, sizeof(Tick_TypeDef));
    
	struct rt_object_information *information;
    
    information = rt_object_get_information(RT_Object_Class_Thread);
    
	RT_ASSERT(information != RT_NULL);
    
	whole_time = (uint64_t)(total_time.u32_ms * 1000 + total_time.u32_us);
	for (i = 0, node  = information->object_list.next; node != &(information->object_list); node  = node->next, i++)
	{
		thread 									= (rt_thread_t)rt_list_entry(node, struct rt_object, list);
		
		/*  */
		RT_ASSERT( *(uint32_t*)(thread->user_data + sizeof(Tick_TypeDef)) = 0xFFFFFFFF);
		
		thread_run_info[i].pthread      		= thread;
		Tick_TypeDef* p_time 					= (Tick_TypeDef*)(thread->user_data);
		thread_run_info[i].run_time     		= (uint64_t)(p_time->u32_ms * 1000 + p_time->u32_us);
		thread_run_info[i].priority     		= thread->current_priority; 
		thread_run_info[i].cur_percentage		= (float)thread_run_info[i].run_time / whole_time;
		
		if(strcmp(thread_run_info[i].pthread->parent.name, "tidle0") != 0)
		{
			idle_percent -= thread_run_info[i].cur_percentage;
			if(idle_percent <= 0.0f)
			{
				idle_percent = 0.0f;
			}
		}
		
		rt_memset((void*)thread->user_data, 0, sizeof(Tick_TypeDef));
		
		rt_strncpy(thread_run_info[i].name, thread->parent.name, RT_NAME_MAX);
		
		rt_uint8_t *ptr;
        ptr = (rt_uint8_t *)thread->stack_addr;
        while (*ptr == '#')ptr ++;
        
        thread_run_info[i].max_stack = (thread->stack_size - ((rt_ubase_t) ptr - (rt_ubase_t) thread->stack_addr)) * 100
                                       / thread->stack_size;
	}
	thread_runtime_trace.thread_num = i - 1;	
	
	for (i = 0, node  = information->object_list.next; node != &(information->object_list); node  = node->next, i++)
	{
		if(strcmp(thread_run_info[i].pthread->parent.name, "tidle0") == 0)
		{
			thread_run_info[i].cur_percentage = idle_percent;
		}
		
		if(thread_run_info[i].cur_percentage > thread_run_info[i].max_percentage)
		{
			thread_run_info[i].max_percentage = thread_run_info[i].cur_percentage;
		}
	}
}

MSH_CMD_EXPORT(cal_thread_usage,cal_thread_usage);

void thread_stats_print(uint32_t thread_num)
{
    thread_run_info_def *pthread_run_info;
    float cpu_load = 0.0;
#ifdef CPU_MEM_PRINT
    rt_size_t total,used,max_used;
    rt_memory_info(&total, &used, &max_used);
	//rt_enter_critical();
	CPU_USAGE_LOG("\r\n");
    CPU_USAGE_LOG("mem_total:%u\t max_used:%u\t mem_sur:%u\t\r\n",total, max_used, total - used);
#endif
#ifdef CPU_USAGE_PRINT
	cpu_load = cpu_load_average();
	CPU_USAGE_LOG("cpu_load_average:%d%\r\n",(int)cpu_load);
#endif
#ifdef CPU_THREAD_PRINT
    CPU_USAGE_LOG("thread      \tprio\tcur(%dms)\tmax(%dms)\tmax stack\r\n", CPU_CALCULATED_RATE, CPU_CALCULATED_RATE);
    
    for(uint8_t i = 0; i <= thread_num; i++)
    {
        pthread_run_info = &thread_run_info[i];
		
        CPU_USAGE_LOG("%-8s\t%2u\t%3u.%02u %\t%3u.%02u %\t %2u%%\r\n",  pthread_run_info->name, 
                                                (uint32_t)pthread_run_info->priority, 
												(uint8_t)(pthread_run_info->cur_percentage * 100), 
												(uint16_t)(pthread_run_info->cur_percentage * 10000)- (uint16_t)(pthread_run_info->cur_percentage * 100) *100, 
												(uint8_t)(pthread_run_info->max_percentage * 100), 
												(uint16_t)(pthread_run_info->max_percentage * 10000)- (uint16_t)(pthread_run_info->max_percentage * 100) *100,
                                                (uint32_t)pthread_run_info->max_stack);
    }
#endif
	CPU_USAGE_LOG("\r\n");
	//rt_exit_critical();
}

static void cal_timeout(void *parameter)
{
	cal_thread_usage();
	rt_work_urgent(&g_cpu_work);
}


void cpu_work_func(struct rt_work *work, void *work_data)
{
    thread_stats_print(thread_runtime_trace.thread_num);
}


/* �����̳߳�ʼ������ٳ�ʼ�� */
int cpu_usage_init(void)
{
#ifdef CPU_USAGE_PRINT	
	cpu_usage_t *obj = cpu_usage_obj();
    rt_timer_t t = &obj->time;
    char idle_name[RT_NAME_MAX];
    int i;

    if (rt_object_get_type(&t->parent) != RT_Object_Class_Timer)
    {
        /* init time */
        rt_timer_init(t, "usage", timeout, obj, 1,
            RT_TIMER_FLAG_PERIODIC | RT_TIMER_FLAG_HARD_TIMER);
        /* set cpus */
        obj->cpus = sizeof(obj->idle_stat) / sizeof(obj->idle_stat[0]);
        /* get idle thread handle */
        for (i = 0; i < obj->cpus; i++)
        {
            rt_snprintf(idle_name, sizeof(idle_name), "tidle%d", i);
            obj->idle_stat[i].tid = rt_thread_find(idle_name);
        }
        /* set flags */
        obj->state = CPU_USAGE_STATE_ACTIVATED;
        /* start */
        rt_timer_start(t);
    }
#endif
	if(thread_usage_init() == 0)
	{
		rt_timer_t cal_timer = rt_timer_create("Cal_timer", cal_timeout,RT_NULL,CPU_CALCULATED_RATE, RT_TIMER_FLAG_PERIODIC | RT_TIMER_FLAG_HARD_TIMER);
		
		rt_work_init(&g_cpu_work, cpu_work_func, NULL);
		
		if(cal_timer != NULL)
		{
			rt_timer_start(cal_timer);
		}
		else
		{
			CPU_USAGE_LOG("rt_timer_create error \n");
			return -1;
		}
	}
	else
	{
		CPU_USAGE_LOG("thread_usage_init error \n");
		return -1;
	}

    return 0;
}

#ifdef RT_USING_COMPONENTS_INIT
//INIT_APP_EXPORT(cpu_usage_init);
#endif

