# coding:utf-8
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import KernelDensity
from scipy import integrate
import pandas as pd
import pickle
from datetime import datetime


class Count:
    def __init__(self, kde, x):
        self.kde = kde
        self.x = x

    def count(self):
        # data = self.data.sort_values('o_totalprice')
        # data.reset_index(drop=True, inplace=True)

        count = 0
        result_list = []

        start = datetime.now()

        total_min = self.x.min()
        total_max = self.x.max()
        x_min = total_min
        x_max = total_min
        ans = float(0)

        def f_p(*args):
            return np.exp(self.kde.score_samples(np.array(args).reshape(1, -1)))

        con_time = 1
        for data in self.x:
            x_min = float(data) - float(10000)
            if x_min < total_min:
                x_min = total_min
            x_max = float(data) + float(10000)
            if x_max > total_max:
                x_max = total_max

            if count > con_time:
                ans = integrate.quad(f_p, x_min, x_max, epsabs=10.0, epsrel=0.1)[0] * float(1500000)

                tmp = len(str(int(ans))) - 1
                con_time = pow(10, tmp)
                count = 2
            else:
                count = count + 1

            result_list.append(ans)

        end = datetime.now()
        time_cost = (end - start).total_seconds()
        print("time cost {}".format(time_cost))

        result_list = np.array(result_list)
        print("now we calculate avg relative error")
        standard_result = pd.read_csv("../data/count1g.csv")['cnt']
        standard_result = standard_result.values
        relative_error = (abs(standard_result - result_list) / standard_result).mean() * 100
        print("relative error is: {}%".format(relative_error))
