/*
 * ZFreq by CyrIng
 *
 * Copyright (C) 2014 CYRIL INGENIERIE
 * Licenses: GPL2
 */

#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/delay.h>

MODULE_AUTHOR ("CyrIng");
MODULE_DESCRIPTION ("ZFreq_Ring0");
MODULE_SUPPORTED_DEVICE ("all");
MODULE_LICENSE ("GPL");

#define ZFreq_RDMSR(Reg, Val)	\
{					\
	__asm__ volatile		\
	(				\
		"rdmsr ;"		\
                : "=A" (Val)		\
		: "c" (Reg)		\
	);				\
}

#define ZFreq_WRMSR(Reg, Val)	\
{					\
	__asm__ volatile		\
	(				\
		"wrmsr ;"		\
		:			\
                : "c" (Reg),		\
		  "A" (Val)		\
	);				\
}

#include "zfreq.h"

static struct
{
	int Major;
	struct cdev *kcdev;
	dev_t nmdev, mkdev;
	struct class *clsdev;
} ZFreq;

static PROCESSOR *Proc=NULL;

void ZFreq_CPUBrand(FEATURES *features)
{
	BRAND Brand;

	char tmpString[48+1]={0x20};
	int ix=0, jx=0, px=0;

	for(ix=0; ix<3; ix++)
	{
		__asm__ volatile
		(
			"cpuid ;"
			: "=a"  (Brand.AX),
			  "=b"  (Brand.BX),
			  "=c"  (Brand.CX),
			  "=d"  (Brand.DX)
			: "a"   (0x80000002 + ix)
		);
		for(jx=0; jx<4; jx++, px++)
			tmpString[px]=Brand.AX.Chr[jx];
		for(jx=0; jx<4; jx++, px++)
			tmpString[px]=Brand.BX.Chr[jx];
		for(jx=0; jx<4; jx++, px++)
			tmpString[px]=Brand.CX.Chr[jx];
		for(jx=0; jx<4; jx++, px++)
			tmpString[px]=Brand.DX.Chr[jx];
	}
	for(ix=jx=0; jx < px; jx++)
		if(!(tmpString[jx] == 0x20 && tmpString[jx+1] == 0x20))
			features->BrandString[ix++]=tmpString[jx];
}

void	ZFreq_CPUID(FEATURES *features)
{
	__asm__ volatile
	(
		"movq	$0x1, %%rax;"
		"cpuid;"
		: "=a"	(features->Std.AX),
		  "=b"	(features->Std.BX),
		  "=c"	(features->Std.CX),
		  "=d"	(features->Std.DX)
	);

	__asm__ volatile
	(
		"movq	$0x4, %%rax;"
		"xorq	%%rcx, %%rcx;"
		"cpuid;"
		"shr	$26, %%rax;"
		"and	$0x3f, %%rax;"
		"add	$1, %%rax;"
		: "=a"	(features->ThreadCount)
	);
	features->ThreadCount=(!features->ThreadCount) ? 1 : features->ThreadCount;

	__asm__ volatile
	(
		"movq	$0x80000000, %%rax;"
		"cpuid;"
		: "=a"	(features->LargestExtFunc)
	);
	if(features->LargestExtFunc >= 0x80000004 && features->LargestExtFunc <= 0x80000008)
	{
		__asm__ volatile
		(
			"movq	$0x80000001, %%rax;"
			"cpuid;"
			: "=c"	(features->Ext.CX),
			  "=d"	(features->Ext.DX)
		);
		ZFreq_CPUBrand(features);
	}
}

int ZFreq_threadfn(void *data)
{
	if(data != NULL)
	{
		CORE *Core=(CORE *) data;

		// Enable the Performance Counters 1 and 2 :
		// - Set the global counter bits
		ZFreq_RDMSR(ZF_IA32_PERF_GLOBAL_CTRL, Core->GlobalPerfCounter);
		Core->GlobalPerfCounter.EN_FIXED_CTR1=1;
		Core->GlobalPerfCounter.EN_FIXED_CTR2=1;
		ZFreq_WRMSR(ZF_IA32_PERF_GLOBAL_CTRL, Core->GlobalPerfCounter);

		// - Set the fixed counter bits
		ZFreq_RDMSR(ZF_IA32_FIXED_CTR_CTRL, Core->FixedPerfCounter);
		Core->FixedPerfCounter.EN1_OS=1;
		Core->FixedPerfCounter.EN2_OS=1;
		Core->FixedPerfCounter.EN1_Usr=1;
		Core->FixedPerfCounter.EN2_Usr=1;

		// AnyThread States 0:per Thread , 1:per Core.
		Core->FixedPerfCounter.AnyThread_EN1=0;
		Core->FixedPerfCounter.AnyThread_EN2=0;
		ZFreq_WRMSR(ZF_IA32_FIXED_CTR_CTRL, Core->FixedPerfCounter);

		// Initial read of Unhalted Core & Reference Cycles.
		ZFreq_RDMSR(ZF_IA32_FIXED_CTR1, Core->UnhaltedCoreCycles[0]);
		ZFreq_RDMSR(ZF_IA32_FIXED_CTR2, Core->UnhaltedRefCycles[0]);

		// Initial read of C-States.
		ZFreq_RDMSR(ZF_MSR_CORE_C3_RESIDENCY, Core->RefCycles.C3[0]);
		ZFreq_RDMSR(ZF_MSR_CORE_C6_RESIDENCY, Core->RefCycles.C6[0]);

		// Initial read of TSC.
		ZFreq_RDMSR(ZF_IA32_TIME_STAMP_COUNTER, Core->TSC[0]);

		// Update the Base Operating Ratio.
		ZFreq_RDMSR(ZF_IA32_PERF_STATUS, Core->Operating);

		// Thermal Junction MAX.
		ZFreq_RDMSR(ZF_MSR_TEMPERATURE_TARGET, Core->TjMax);

		while(!kthread_should_stop())
		{
			// Update the Unhalted Core & Reference Cycles.
			ZFreq_RDMSR(ZF_IA32_FIXED_CTR1, Core->UnhaltedCoreCycles[1]);
			ZFreq_RDMSR(ZF_IA32_FIXED_CTR2, Core->UnhaltedRefCycles[1]);
			// Update C-State.
			ZFreq_RDMSR(ZF_MSR_CORE_C3_RESIDENCY, Core->RefCycles.C3[1]);
			ZFreq_RDMSR(ZF_MSR_CORE_C6_RESIDENCY, Core->RefCycles.C6[1]);
			// Update the TSC relating to the Core.
			ZFreq_RDMSR(ZF_IA32_TIME_STAMP_COUNTER, Core->TSC[1]);
			// Update the Digital Thermal Sensor.
			ZFreq_RDMSR(ZF_IA32_THERM_STATUS, Core->Therm);

			msleep(Proc->idleTime);
			// Save Unhalted Core & Ref Cycles for next iteration.
			Core->UnhaltedCoreCycles[0] = Core->UnhaltedCoreCycles[1];
			Core->UnhaltedRefCycles[0]  = Core->UnhaltedRefCycles[1];
			// Save C-State Reference Cycles.
			Core->RefCycles.C3[0] = Core->RefCycles.C3[1];
			Core->RefCycles.C6[0] = Core->RefCycles.C6[1];
			// Save TSC.
			Core->TSC[0]=Core->TSC[1];
		}
		// Reset the fixed counters.
		Core->FixedPerfCounter.EN1_OS=0;
		Core->FixedPerfCounter.EN2_OS=0;
		Core->FixedPerfCounter.EN1_Usr=0;
		Core->FixedPerfCounter.EN2_Usr=0;
		Core->FixedPerfCounter.AnyThread_EN1=0;
		Core->FixedPerfCounter.AnyThread_EN2=0;
		ZFreq_WRMSR(ZF_IA32_FIXED_CTR_CTRL, Core->FixedPerfCounter);
		// Reset the global counters.
		Core->GlobalPerfCounter.EN_FIXED_CTR1=0;
		Core->GlobalPerfCounter.EN_FIXED_CTR2=0;
		ZFreq_WRMSR(ZF_IA32_PERF_GLOBAL_CTRL, Core->GlobalPerfCounter);
	}
	return(0);
}

static int ZFreq_mmap(struct file *filp, struct vm_area_struct *vma)
{
	if(Proc && !remap_vmalloc_range(vma, Proc, 0))
	{
		unsigned int cpu=0;
		for(cpu=0; cpu < Proc->Features.ThreadCount; cpu++)
			wake_up_process(Proc->Core[cpu].TID);
	}
	return(0);
}

static int ZFreq_release(struct inode *inode, struct file *file)
{
	if(Proc)
		Proc->idleTime=LOOP_DEF_MS;
	return(0);
}

static struct file_operations ZFreq_fops=
{
	.mmap	= ZFreq_mmap,
	.open	= nonseekable_open,
	.release= ZFreq_release
};

static int __init ZFreq_init(void)
{
	Proc=vmalloc_user(sizeof(PROCESSOR));

	ZFreq.kcdev=cdev_alloc();
	ZFreq.kcdev->ops=&ZFreq_fops;
	ZFreq.kcdev->owner=THIS_MODULE;

        if(alloc_chrdev_region(&ZFreq.nmdev, 0, 1, SHM_FILENAME) >= 0)
	{
		ZFreq.Major=MAJOR(ZFreq.nmdev);
		ZFreq.mkdev=MKDEV(ZFreq.Major,0);

		if(cdev_add(ZFreq.kcdev, ZFreq.mkdev, 1) >= 0)
		{
			struct device *tmpDev;

			ZFreq.clsdev=class_create(THIS_MODULE, SHM_DEVNAME);

			if((tmpDev=device_create(ZFreq.clsdev, NULL, ZFreq.mkdev, NULL, SHM_DEVNAME)) != NULL)
			{
				unsigned int cpu=0/*, CPUCount=0*/;
/*
				CPUCount=ZFreq_CPUCount();
				Proc->CPUCount=(!CPUCount) ? 1 : CPUCount;
*/
				ZFreq_CPUID(&Proc->Features);

				printk("ZFreq:%s [%d x CPU]\n", Proc->Features.BrandString, Proc->Features.ThreadCount);

				// Base Operating Ratio.
				ZFreq_RDMSR(ZF_MSR_PLATFORM_INFO, Proc->Platform);
				// Turbo Boost Ratios.
				ZFreq_RDMSR(ZF_MSR_TURBO_RATIO_LIMIT, Proc->Turbo);

				Proc->idleTime=LOOP_DEF_MS;

				for(cpu=0; cpu < Proc->Features.ThreadCount; cpu++)
				{
					Proc->Core[cpu].cpu=cpu;

					Proc->Core[cpu].TID=kthread_create(ZFreq_threadfn, &Proc->Core[cpu], "ZFreqthread%02d", Proc->Core[cpu].cpu);

					kthread_bind(Proc->Core[cpu].TID, Proc->Core[cpu].cpu);

				}
			}
			else
			{
				printk("ZFreq_init():device_create():KO\n");
				return(-EBUSY);
			}
		}
		else
		{
			printk("ZFreq_init():cdev_add():KO\n");
			return(-EBUSY);
		}
	}
	else
	{
		printk("ZFreq_init():alloc_chrdev_region():KO\n");
		return(-EBUSY);
	}
	return(0);
}

static void __exit ZFreq_cleanup(void)
{
	device_destroy(ZFreq.clsdev, ZFreq.mkdev);
	class_destroy(ZFreq.clsdev);
	cdev_del(ZFreq.kcdev);
	unregister_chrdev_region(ZFreq.mkdev, 1);

	if(Proc)
	{
		unsigned int cpu;
		for(cpu=0; cpu < Proc->Features.ThreadCount; cpu++)
			kthread_stop(Proc->Core[cpu].TID);

		vfree(Proc);
	}
}

module_init(ZFreq_init);
module_exit(ZFreq_cleanup);
