import numpy as np
from numpy.random import beta
import matplotlib.pyplot as plt
from scipy import linalg

# 支持中文
plt.rcParams['font.sans-serif'] = ['Arial Unicode MS']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号


class Bandit(object):

    def __init__(self, arm_priority):
        self._cumulative_regret_list = []  # 累计遗憾表
        self._cumulative_regret = 0  # 累计遗憾
        self._priority = arm_priority  # 臂的优先级
        self._best = np.max(self._priority)  # 选出优先级最高的
        self._win_of_arms = np.zeros(len(arm_priority))
        self._loss_of_arms = np.zeros(len(arm_priority))

    def pull(self, arm):
        return int(np.random.rand() < self._priority[arm])

    def select(self):
        raise Exception('not implement')

    def update(self, arm, reward):
        self._win_of_arms[arm] += reward
        self._loss_of_arms[arm] += 1 - reward
        regret = self._best - self._priority[arm]
        self._cumulative_regret += regret
        self._cumulative_regret_list.append(
            self._cumulative_regret)

    def simulate(self):
        arm = self.select()
        reward = self.pull(arm)
        self.update(arm, reward)

    @property
    def cumulative_regret(self):
        return self._cumulative_regret

    @property
    def cumulative_regret_list(self):
        return self._cumulative_regret_list


class ThompsonSamplingBandit(Bandit):

    def __init__(self, arm_priority):
        Bandit.__init__(self, arm_priority)

    def select(self):
        randoms = beta(1 + self._win_of_arms, 1 + self._loss_of_arms)
        return np.argmax(randoms)


class UCBBandit(Bandit):

    def __init__(self, arm_priority):
        Bandit.__init__(self, arm_priority)
        self._trials = 0
        self._avg_reward = np.zeros(len(arm_priority))

    def select(self):
        trial_of_arms = self._win_of_arms + self._loss_of_arms
        avg = self._avg_reward
        avg += np.sqrt(2 * np.log(1 + self._trials) / (1 + trial_of_arms))
        return np.argmax(avg)

    def update(self, arm, reward):
        Bandit.update(self, arm, reward)
        self._trials += 1
        trials_of_arm = self._win_of_arms[arm] + self._loss_of_arms[arm]
        self._avg_reward[arm] = ((trials_of_arm - 1) * self._avg_reward[arm]
                                 + self._win_of_arms[arm]) / trials_of_arm


class EpsilonGreedyBandit(Bandit):

    def __init__(self, arm_priority, epsilon, min_trials=0):
        Bandit.__init__(self, arm_priority)
        self._epsilon = epsilon
        self._avg_reward = np.zeros(len(arm_priority))
        self._trials = 0
        self._min_trials = min_trials

    def select(self):
        if (np.random.rand() < self._epsilon
                or self._trials < self._min_trials):
            return np.random.choice(range(len(self._win_of_arms)))
        arm = np.argmax(self._avg_reward)
        return arm

    def update(self, arm, reward):
        Bandit.update(self, arm, reward)
        self._trials += 1
        trials_of_arm = self._win_of_arms[arm] + self._loss_of_arms[arm]
        self._avg_reward[arm] = self._win_of_arms[arm] / trials_of_arm


class LinUCB(object):
    def __init__(self, alpha=0.25,
                 r1=0.8, r0=0,
                 d=2, arms=[]):
        self._alpha = alpha
        self._r1 = r1
        self._r0 = r0
        self._d = d
        self._arms = arms
        self._Aa = []
        self._AaI = []
        self._ba = []
        self._theta = []
        for arm in range(len(self._arms)):
            self._Aa.append(np.identity(d))
            self._ba.append(np.zeros((d, 1)))
            self._AaI.append(np.identity(d))
            self._theta.append(np.zeros((d, 1)))
        self._x = None
        self._xT = None

    def pull(self, arm):
        return int(np.random.rand() < self._arms[arm])

    def select(self, user_feature=None):
        if not user_feature:
            user_feature = np.identity(self._d)
        # context feature d x 1
        xaT = np.array([user_feature])
        xa = np.transpose(xaT)
        arm_count = len(self._arms)
        AaI_tmp = np.array([self._AaI[arm] for arm in range(arm_count)])
        theta_tmp = np.array([self._theta[arm] for arm in range(arm_count)])
        expected_reward = np.array([np.dot(xaT, self._theta[arm])
                                    for arm in range(arm_count)])
        bound = np.array([self._alpha * np.sqrt(np.dot(np.dot(xaT,
                                                              self._AaI[arm]),
                                                       xa))
                          for arm in range(arm_count)])
        confidence_bound = expected_reward + bound
        selected_arm = np.argmax(confidence_bound)

        self._x = xa
        self._xT = xaT
        return selected_arm

    def update(self, arm, reward):
        r = self._r1 if reward == 1 else self._r0
        self._Aa[arm] += np.dot(self._x, self._xT)
        self._ba[arm] += r * self._x
        self._AaI[arm] = linalg.solve(self._Aa[arm], np.identity(self._d))
        self._theta[arm] = np.dot(self._AaI[arm], self._ba[arm])

    def simulate(self, user):
        arm = self.select(user)
        reward = self.pull(arm)
        self.update(arm, reward)
        return arm


if __name__ == '__main__':
    priority = [0.15, 0.20, 0.42]
    name1, bandit_1 = "汤普森采样算法", ThompsonSamplingBandit(priority)
    name2, bandit_2 = "UCB算法", UCBBandit(priority)
    name3, bandit_3 = "完全贪婪", EpsilonGreedyBandit(priority, 0)
    name4, bandit_4 = "epsilon(5%)", EpsilonGreedyBandit(priority, 0.05)
    name5, bandit_5 = "epsilon(10%)", EpsilonGreedyBandit(priority, 0.1)
    name6, bandit_6 = "epsilon(20%)", EpsilonGreedyBandit(priority, 0.2)
    name7, bandit_7 = "完全随机", EpsilonGreedyBandit(priority, 1)

    user = [[1, 0], [0, 1]]

    t = 100
    for i in range(t):
        bandit_1.simulate()
        bandit_2.simulate()
        bandit_3.simulate()
        bandit_4.simulate()
        bandit_5.simulate()
        bandit_6.simulate()
        bandit_7.simulate()

    c1, = plt.plot(range(t), bandit_1.cumulative_regret_list, linewidth=2, linestyle='solid')
    c2, = plt.plot(range(t), bandit_2.cumulative_regret_list, linewidth=2, linestyle='--')
    c3, = plt.plot(range(t), bandit_3.cumulative_regret_list, linewidth=1, linestyle='-.')
    c4, = plt.plot(range(t), bandit_4.cumulative_regret_list, linewidth=2, linestyle='dotted')
    c5, = plt.plot(range(t), bandit_5.cumulative_regret_list, linewidth=2, linestyle='dashdot')
    c6, = plt.plot(range(t), bandit_6.cumulative_regret_list, linewidth=1, linestyle='dashed')
    c7, = plt.plot(range(t), bandit_7.cumulative_regret_list, linewidth=1, linestyle='dashdot')

    plt.ylabel('累计遗憾')
    plt.xlabel('尝试次数')
    plt.legend(handles=[c1, c2, c3, c4, c5, c6, c7],
               labels=[name1, name2, name3, name4, name5, name6, name7],
               loc='best')
    plt.show()
