import numpy as np
import pandas as pd
import datetime
from scipy.optimize import minimize
from scipy import linalg
from AssetAllocation.Common import *
from AssetAllocation.RiskParty import RiskBudget
from AssetAllocation.BasicOptimize import BasicOptimize
from AssetAllocation.Markowitz import Markowitz
from AssetAllocation.TestBL import *
import Core.Config as Config
from AssetAllocation.Markowitz import Calc_Portfolio_Profile

class BlackLitterman(BasicOptimize):

    def __init__(self, covariance,
                 q_view_list,
                 view_confidence,
                 view_weight_matrix=[],
                 omega_market=[],
                 lambda_market=1,
                 lambda_user=6.5,
                 tau=None,
                 tau_odds=None,
                 risk_free_rate=0,
                 print_log=True):
        #
        super().__init__()
        self.num_assets = covariance.shape[0]
        self.risk_free_rate = risk_free_rate
        # self.minbounds = np.ones(self.num_assets) * 0
        # self.maxbounds = np.ones(self.num_assets) * 1.0

        # 市场参数
        self.lambda_market = lambda_market  # 市场端风险厌恶系数，在逆向优化求解时使用
        self.lambda_user = lambda_user  # 用户端风险厌恶系数，在优化求解时使用
        self.sigma_mkt_cov = covariance  # covariance
        self.omega_market = omega_market  # 市场权重，用于倒推均衡权重
        self.tau = None  # 标量，用于控制（先验）均衡收益分布的方差（协方差）

        # 观点参数
        self.view_weights = np.array(view_weight_matrix)
        self.q_view_list = q_view_list  # View -> Mean
        self.omega_view_uncertainty = self.calc_confidence_to_certainty(view_confidence)  # View -> SD/Var

        if tau:
            self.tau = tau
        else:
            self.tau = 0.02

        if tau_odds and tau == None:
            self.tau = self.calc_tao(view_confidence, tau_odds)

        # 逆优化过程 求Pi -> 隐含均衡收益
        # Market -> Mean
        self.pi = self.calc_pi_reverse_reverse_optimize()
        # Market -> sd/var
        # tao * sigma_mkt_cov

        # 得到后验分布
        self.bl_expected_return, self.bl_sigma = self.get_new_combined_return_dist()

        if print_log:
            print("")
            print("Tao", self.tau)
            print("")
            print("Pi-Equilibrium Return")
            print(self.pi)
            print("")
            print("BL-Expected Return (ER)")
            print(self.bl_expected_return)
            print("")
            print("BL-Covariance (Sigma)")
            print(self.bl_sigma)
        a = 0

    def set_contraints(self):
        pass

    # 计算tau观点权重 Adzorek 2002
    def calc_tao(self, confidence_list, tao_odds):
        #
        tau_odds_mean = np.mean(tao_odds)
        #
        lc_list = []
        for i in range(len(confidence_list)):
            lc_list.append(1.0 / confidence_list[i])
        #
        avg_lc = float(sum(lc_list) / len(confidence_list))

        # tau_odds 修正为错误率，表达投资观点误差，因此tau_odds_mean转换为分子端
        # tau = 1.0 / (tau_odds_mean * avg_lc)  #
        tao = tau_odds_mean * 1.0 / avg_lc  # 观点错误率越高，tao越大，市场协方差占比越高
        #
        return tao

    # Adzorek 2002
    def calc_confidence_to_certainty(self, view_confidence):
        # 处理信心 不能为0
        for i in range(len(view_confidence)):
            if view_confidence[i] == 0:
                view_confidence[i] = 0.01

        # 平均信心水平，原版论文为50%
        # 老版本中计算方法为 tao_odds 的平均数
        # avg_confidence = np.mean(tao_odds)
        avg_confidence = np.mean(view_confidence)

        lc_list = []
        for i in range(len(view_confidence)):
            lc_list.append(1.0 / view_confidence[i])

        # 计算omega观点误差的协方差矩阵，是对角矩阵
        weightP_1 = np.array(self.view_weights).sum(axis=0)
        # print 'weightP_1',weightP_1

        CF = avg_confidence * np.dot(np.dot(weightP_1, self.sigma_mkt_cov), weightP_1.T)
        # print 'CF',CF
        for i in range(len(lc_list)):
            lc_list[i] = lc_list[i] * CF
        omega = np.diag(lc_list)
        return omega

    # 隐含均衡收益
    # Implied Equilibrium Return Vector
    def calc_pi_reverse_reverse_optimize(self):
        pi = self.lambda_market * np.dot(self.sigma_mkt_cov, np.array(self.omega_market).T)  # n*1
        return pi

    # 计算BL后验协方差
    def calc_bl_sigma(self):
        # a0 = np.array(self.sigma_mkt_cov)
        # a1 = linalg.inv(self.tau * a0)
        # b0 = linalg.inv(np.array(self.omega_view_uncertainty))
        # b1 = np.dot(self.view_weights.T, b0)
        # b2 = np.dot(b1, self.view_weights)

        bl_sigma = linalg.inv(linalg.inv(self.tau * np.array(self.sigma_mkt_cov))
                                   + np.dot(np.dot(self.view_weights.T, linalg.inv(np.array(self.omega_view_uncertainty))),
                                            self.view_weights))
        return bl_sigma

    # 计算BL后验收益率期望 E(R)
    def calc_bl_expected_return(self):
        #
        step_1 = linalg.inv(
            self.omega_view_uncertainty + self.tau * np.dot(np.dot(self.view_weights, self.sigma_mkt_cov), self.view_weights.T))

        step_2 = (self.q_view_list - np.dot(self.view_weights, self.pi))

        # 计算后验收益期望 E[R] = pi + tao * sigma *P.T(omega + tao * P * sigma * P.T).I(Q - P * pi)
        res = self.pi + self.tau * np.dot(np.dot(np.dot(self.sigma_mkt_cov, self.view_weights.T), step_1), step_2)
        return res

    def get_new_combined_return_dist(self):
        bl_expected_return = self.calc_bl_expected_return()
        bl_sigma = self.calc_bl_sigma()
        return bl_expected_return, bl_sigma

    # 目标函数, max 这个数值，本例中是求最小，所以调转了符号
    def max_bl_return(self, w):
        bl_er = self.bl_expected_return
        bl_sigma = self.bl_sigma
        #
        exp_return = np.dot(np.array(w).T, bl_er) - self.lambda_user / 2.0 * np.dot(np.dot(np.array(w).T, bl_sigma), np.array(w))
        return -1 * exp_return

    def optimize(self):
        # 初始化资产权重
        w0 = np.ones(self.num_assets) * 1.0 / self.num_assets

        # bnds = np.array([self.minbounds, self.maxbounds]).T
        cons = [{'type': 'eq', 'fun': lambda x: np.sum(x) - 1}]
        #
        bnds, instrument_type_cons = self._process_constraints()
        cons += instrument_type_cons
        opts = minimize(self.max_bl_return,
                        w0,
                        method='SLSQP',
                        bounds=bnds,
                        constraints=cons)
        # print(opts)
        return opts.x

    def optimize2(self):
        # 初始化资产权重
        w0 = np.ones(self.num_assets) * 1.0 / self.num_assets

        bnds = np.array([self.minbounds, self.maxbounds]).T
        _init = True
        CtrlVector = [1] * len(w0)
        while True:
            cons, bnds_min, j = self.constraint_list(CtrlVector)
            sol = minimize(self.max_bl_return, w0, method='SLSQP', constraints=cons, bounds=bnds, tol=1e-15)
            # print(sol. x)
            rs = []
            for i in range(len(sol.x)):
                if sol.x[i] < 1e-6:
                    rs.append(0.0)
                else:
                    rs.append(round(sol.x[i], 4))

            # 控制向量
            CtrlVector = list(self.minBoundCtrlMatrix[j])

            # 控制矩阵全为1，无需调整
            if (_init and sum(CtrlVector) == len(CtrlVector)) or not _init:
                return rs, sol.success

            # 控制矩阵存在调整项
            else:
                _init = False
                for x in range(len(CtrlVector)):
                    # 当出现控制矩阵元素为0的情况，满足触底归零条件
                    if CtrlVector[x] == 0:
                        # 资产配置比例未达到下限 或者 资产比例为零 则不用调整
                        if abs(rs[x] - bnds_min[x]) > 1e-2 or rs[x] == bnds_min[x] == 0:
                            CtrlVector[x] = 1
                if len(CtrlVector) == sum(CtrlVector):
                    return rs, sol.success


class SortedViews(object):
    def __init__(self, symbols_list):
        self.sorted_view_list = []
        self.num_assets = len(symbols_list)
        self.symbols_list = symbols_list

    #
    def AddView(self, symbol1, symbol2, premium, confidence=1):
        if not isinstance(symbol1, list):
            symbol1 = [symbol1]
        if not isinstance(symbol2, list):
            symbol2 = [symbol2]
        self.sorted_view_list.append({"symbol": symbol1, "greaterThan": symbol2, "expectedPremium": premium, "confidence": confidence})

    # Satchell && Scowcroft 方法
    def GetResult_SS(self):
        view_Q = []
        view_weight_P = []
        view_confidence = []
        for view in self.sorted_view_list:
            view_weight_row = [0 for i in range(self.num_assets)]
            symbols = view["symbol"]
            benchmarks = view["greaterThan"]
            premium = view["expectedPremium"]
            confidence = view["confidence"]
            #
            symbol_weights = 1 / len(symbols)
            benchmark_weights = -1 / len(benchmarks)

            for symbol in symbols:
                symbol_index = self.symbols_list.index(symbol)
                view_weight_row[symbol_index] = symbol_weights

            for benchmark in benchmarks:
                if benchmark == None:
                    continue
                benchmark_index = self.symbols_list.index(benchmark)
                view_weight_row[benchmark_index] = benchmark_weights

            view_Q.append(premium)
            view_weight_P.append(view_weight_row)
            view_confidence.append(confidence)

        return view_weight_P, view_Q, view_confidence


def Process_Sorted_Views(symbols_list, sorted_view_list):
    #
    sorted_views = SortedViews(symbols_list=symbols_list)

    for view in sorted_view_list:
        symbol1 = view.get("symbol")
        symbol2 = view.get("greaterThan")
        premium = view["expectedPremium"]
        confidence = view["confidence"]
        #
        if not premium:
            premium = 0.01
        if not confidence:
            confidence = 1
        if symbol2 == "":
            symbol2 = None
        #
        sorted_views.AddView(symbol1, symbol2, premium, confidence)

    view_weight_P, view_Q, view_confidence = sorted_views.GetResult_SS()
    return view_weight_P, view_Q, view_confidence


def Test_View_List(database):

    datetiem1 = datetime.datetime(2017, 1, 1)
    datetiem2 = datetime.datetime(2020, 1, 1)
    symbols = ["000300.SH", "H11001.CSI","AU.SHF", "SPX.GI"]
    print(symbols)
    # profile = Load_Daily_Returns_DataFrame_Calc_Return_Volatility_Correlation(database,
    #                                                                           symbol_list=symbols,
    #                                                                           datetime1=datetiem1,
    #                                                                           datetime2=datetiem2,
    #                                                                           is_log=False,
    #                                                                           annualized_days=252)

    # profile = Generate_Random_Data_Return_Volatility_Correlation(num_obs=100, num_assets=4)
    expected_returns = [0.02, 0.10, 0.20, 0.25]
    expected_volatility = [0.02, 0.10, 0.20, 0.3]
    correlation = Ave_Correlation_to_Matrix(0.8, len(expected_returns))
    covariance = Calc_Covariance_Matrix(correlation, expected_volatility)

    market_weights = [0.25, 0.25, 0.25, 0.25]
    view_weight_P = [[1, 0, 0, 0, ], [0, 1, 0, 0, ], [0, 0, 1, 0, ], [0, 0, 0, 1]]
    view_Q = [0.02, 0.08, 0.15, 0.50]
    view_Q = [0.02, 0.08, 0.15, 0.3]
    view_confidence = [1, 1, 1, 1]

    # view_error_rate = [0.01, 0.5, 0.5, 0.5, 0.5]
    view_error_rate = list(np.ones(len(view_Q)) * 0.5)

    bl = BlackLitterman(covariance=covariance,
                        q_view_list=view_Q,
                        view_confidence=view_confidence,
                        view_weight_matrix=view_weight_P,
                        omega_market=market_weights,
                        lambda_market=1,
                        lambda_user=6.5,
                        tau_odds=view_error_rate)
    #
    weights = bl.optimize()
    print("BL WEIGHT")
    print(weights)

    mv = Markowitz(expected_return_array=bl.bl_expected_return, covariance_matrix=bl.bl_sigma)
    weights = mv.optimize(optimize_objective="MAXSHARPE")
    print("MV WEIGHT")
    print(weights)

    mv = Markowitz(expected_return_array=expected_returns, covariance_matrix=covariance)
    weights = mv.optimize(optimize_objective="MAXSHARPE")
    print("MV WEIGHT based Original MV")
    print(weights)
    pf_profile = Calc_Portfolio_Profile(weights, expected_returns, covariance, risk_free_rate=0.0)
    print(pf_profile)

    mv.plot_efficient_frontier()



# 随机序列，测试
def Test_With_Random_Data():
    #
    num_assets = 5
    num_obs = 100
    df_returns = Generate_Random_Data(num_assets, num_obs)
    profile = Generate_Random_Data_Return_Volatility_Correlation(num_obs=100, num_assets=5)

    print(df_returns)
    covariance = profile["Covariance"]
    #
    print("Return")
    print(profile["Returns"])
    print("Volatility")
    print(profile["Volatilities"])
    print("Correlation")
    print(profile["Correlation"])
    #
    # rb = RiskBudget()
    # rb.initialize_with_returns(df_returns)
    # risk_budget = [0.2, 0.2, 0.2, 0.2, 0.2]
    # market_weights = rb.optimize("VOLATILITY", risk_budget)
    market_weights = [0.2, 0.2, 0.2, 0.2, 0.2]

    system_params_data = {
        'minbounds': [0, 0, 0, 0, 0],
        'maxbounds': [1, 1, 1, 1, 1],
        'w0': [0.2, 0.2, 0.2, 0.2, 0.2],
        'lowerLimitSwitchMatrix': [[1, 1, 0, 0, 0], [1, 1, 1, 0, 0], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1]],
        # 资产开关矩阵
        'upper_limits': [[1, 0.2], [0.6, 0.4], [0, 0.6], [0, 0.2], [0, 0.4]],
        'least_invest_amount': [100, 200, 200, 200, 200],
        'lambda_mkt': 1,
        "lambda_user": 6.5,
        'categoryLimitCoefficients': [[-5000, 2000], [-4000, 3000], [-2000, 5000], [3000, 10000], [5000, 12000]],
        'business_info': [{'indexCode': 'I_CN6112_CNI', 'categoryCode': '001', 'order': 1},
                          {'indexCode': 'I_H11001_CSI', 'categoryCode': '002', 'order': 2},
                          {'indexCode': 'I_000300_SH', 'categoryCode': '003', 'order': 3},
                          {'indexCode': 'I_AUM', 'categoryCode': '004', 'order': 4},
                          {'indexCode': 'I_SPX_GI', 'categoryCode': '005', 'order': 5}],
        'point_weightP': [[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]],
        'confidenceLC': [1.0, 0.6499263, 0.5139907, 0.5, 0.5479227],  # 投资者信心指数 0-1
        'pointQ': [5.56e-05, 0.0001288, 0.0003267, 0.0001519, 0.0003896],  # 预期收益率 日度或年度，和 sigma保持一致
        'historicalOdds': [0.01, 0.5, 0.5, 0.5, 0.5],  # [1, 0.89, 0.62, 0.56, 0.71] 观点错误率
        'sigma': covariance  # 协方差
    }

    bl = BlackLitterman(covariance=covariance,
                        q_view_list=system_params_data["pointQ"],
                        view_confidence=system_params_data["confidenceLC"],
                        view_weight_matrix=system_params_data["point_weightP"],
                        omega_market=market_weights,
                        lambda_market=system_params_data["lambda_mkt"],
                        lambda_user=6.5,
                        tau_odds=system_params_data["historicalOdds"])
    #
    weights = bl.optimize()
    print(weights)


def Test_With_Real_Data(database):
    datetiem1 = datetime.datetime(2017, 1, 1)
    datetiem2 = datetime.datetime(2020, 1, 1)
    symbols = ["AU.SHF", "H11001.CSI", "000300.SH", "SPX.GI", "000852.SH"]
    # symbols = ["000001.SH", "000300.SH"]
    return_profile = Load_Daily_Returns_DataFrame_Calc_Return_Volatility_Correlation(database,
                                                                                     symbol_list=symbols,
                                                                                     datetime1=datetiem1, datetime2=datetiem2,
                                                                                     is_log=False, annualized_days=252)
    covariance_matrix = return_profile["Covariance"]

    print("Return")
    print(return_profile["Returns"])
    print("Volatility")
    print(return_profile["Volatilities"])
    print("Correlation")
    print(return_profile["Correlation"])

    # rb = RiskBudget()
    # rb.initialize_with_returns(df_returns)
    # risk_budget = [0.2, 0.2, 0.2, 0.2, 0.2]
    # market_weights = rb.optimize("VOLATILITY", risk_budget)
    market_weights = [0.2, 0.2, 0.2, 0.2, 0.2]

    system_params_data = {
        'minbounds': [0, 0, 0, 0, 0],
        'maxbounds': [1, 1, 1, 1, 1],
        'w0': [0.2, 0.2, 0.2, 0.2, 0.2],
        'lowerLimitSwitchMatrix': [[1, 1, 0, 0, 0], [1, 1, 1, 0, 0], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1]],
        # 资产开关矩阵
        'upper_limits': [[1, 0.2], [0.6, 0.4], [0, 0.6], [0, 0.2], [0, 0.4]],
        'least_invest_amount': [100, 200, 200, 200, 200],
        'lambda_mkt': 1,
        "lambda_user": 6.5,
        'categoryLimitCoefficients': [[-5000, 2000], [-4000, 3000], [-2000, 5000], [3000, 10000], [5000, 12000]],
        'business_info': [{'indexCode': 'I_CN6112_CNI', 'categoryCode': '001', 'order': 1},
                          {'indexCode': 'I_H11001_CSI', 'categoryCode': '002', 'order': 2},
                          {'indexCode': 'I_000300_SH', 'categoryCode': '003', 'order': 3},
                          {'indexCode': 'I_AUM', 'categoryCode': '004', 'order': 4},
                          {'indexCode': 'I_SPX_GI', 'categoryCode': '005', 'order': 5}],
        'point_weightP': [[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]],
        'confidenceLC': [1.0, 0.6499263, 0.5139907, 0.5, 0.5479227],  # 投资者信心指数 0-1
        'pointQ': [0.1, 0.0001288, 0.0003267, 0.0001519, 0.0003896],  # 预期收益率 日度或年度，和 sigma保持一致
        'historicalOdds': [0.01, 0.5, 0.5, 0.5, 0.5],  # [1, 0.89, 0.62, 0.56, 0.71] 观点错误率
        'sigma': covariance_matrix  # 协方差
    }

    view_weight_P = [[1, 0, 0, 0, 0]]
    view_Q = [0.15]
    view_confidence = [0.5]  # 投资者信心指数 0-1

    bl = BlackLitterman(covariance=covariance_matrix,
                        q_view_list=view_Q,
                        view_confidence=view_confidence,
                        view_weight_matrix=view_weight_P,
                        omega_market=market_weights,
                        lambda_market=system_params_data["lambda_mkt"],
                        lambda_user=6.5,
                        tau_odds=system_params_data["historicalOdds"])
    #
    weights = bl.optimize()
    print(weights)


def Test_Sorted_Views(database):
    profile = Generate_Random_Data_Return_Volatility_Correlation(num_obs=100, num_assets=5)
    covariance = profile["Covariance"]
    market_weights = [0.2, 0.2, 0.2, 0.2, 0.2]

    # view_weight_matrix = [[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]]
    # views = [5.56e-05, 0.0001288, 0.0003267, 0.0001519, 0.0003896]
    # confidence = [1.0, 0.6499263, 0.5139907, 0.5, 0.5479227]
    # market_weights = [0.2, 0.2, 0.2, 0.2, 0.2]
    # view_error_rate = [0.01, 0.5, 0.5, 0.5, 0.5]

    #
    # sorted_views = SortedViews(symbols_list=["A","B","C","D","E","F","G","H"])
    # sorted_views.AddView("A", None, 0.05, 1)
    # sorted_views.AddView("C", "F", 0.02, 1)
    # sorted_views.AddView(["B","E"], ["D","G"], 0.025, 1)

    sorted_views = SortedViews(symbols_list=["A","B","C","D","E"])
    sorted_views.AddView("A", None, 0.05, 1)
    sorted_views.AddView("C", "E", 0.02, 1)
    sorted_views.AddView(["A","B"], ["C","D"], 0.025, 1)

    view_weight_P, view_Q, view_confidence = sorted_views.GetResult_SS()
    view_error_rate = list(np.ones(len(view_Q)) * 0.5)

    #
    # view_weight_P = [[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]
    # view_Q = [0.01, 0.20, 0.05]
    # view_confidence = [1.0, 0.6499263, 0.5139907]
    # view_error_rate = [0.01, 0.5, 0.5]

    bl = BlackLitterman(covariance=covariance,
                        q_view_list=view_Q,
                        view_confidence=view_confidence,
                        view_weight_matrix=view_weight_P,
                        omega_market=market_weights,
                        lambda_market=1,
                        lambda_user=6.5,
                        tau_odds=view_error_rate)
    #
    weights = bl.optimize()
    print(weights)


def Test_Confidence(database):

    expected_returns = [0.02, 0.08, 0.15, 0.3]
    expected_volatility = [0.4, 0.2, 0.2, 0.2]
    correlation = Ave_Correlation_to_Matrix(0.8, len(expected_returns))
    covariance = Calc_Covariance_Matrix(correlation, expected_volatility)

    #
    market_weights = [0.25, 0.25, 0.25, 0.25]
    view_weight_P = [[1, 0, 0, 0, ], [0, 1, 0, 0, ], [0, 0, 1, 0, ], [0, 0, 0, 1]]
    view_Q = [0.10, 0.10, 0.10, 0.10]
    view_confidence = [1, 0.5, 0.5, 0.5]

    # view_error_rate = [0.01, 0.5, 0.5, 0.5, 0.5]
    view_error_rate = list(np.ones(len(view_Q)) * 1)

    bl = BlackLitterman(covariance=covariance,
                        q_view_list=view_Q,
                        view_confidence=view_confidence,
                        view_weight_matrix=view_weight_P,
                        omega_market=market_weights,
                        lambda_market=1,
                        lambda_user=6.5,
                        tau_odds=view_error_rate)
    #
    weights = bl.optimize()
    print(weights)

    mv = Markowitz(expected_return_array=bl.bl_expected_return, covariance_matrix=bl.bl_sigma)
    weights = mv.optimize(optimize_objective="MAXSHARPE")
    print(weights)


def Run_BL1():

    # optimize_objective = "MAXUSERUTILITY"
    # instrument_list = [{'symbol': 'CN6112.SZ', 'instrumentType': 'CURRENCY'}, {'symbol': 'H11001.CSI', 'instrumentType': 'BOND'}, {'symbol': '000300.SH', 'instrumentType': 'STOCK'}, {'symbol': 'NHAUI.SL', 'instrumentType': 'COMMODITY'}, {'symbol': 'SPX.GI', 'instrumentType': 'OVERSEA'}]
    # symbol_constraints = [{'symbol': 'CN6112.SZ', 'minWeight': 0.024462871026284194, 'maxWeight': 0.3424969012819506}, {'symbol': 'H11001.CSI', 'minWeight': 0.04892574205256839, 'maxWeight': 0.43562422532048767}, {'symbol': '000300.SH', 'minWeight': 0.08118477668724633, 'maxWeight': 0.510939436698781}, {'symbol': 'NHAUI.SL', 'minWeight': 0.08118477668724633, 'maxWeight': 0.16437577467951237}, {'symbol': 'SPX.GI', 'minWeight': 0.0, 'maxWeight': 0.0}]
    #
    # instrument_type_constraints = None
    # optimize_target_volatility = None
    # optimize_target_return = None

    bl = BL1.BlackLitterman(covariance_matrix=covariance_matrix,
                            view_list_Q=view_Q,
                            view_confidence=view_confidence,
                            view_weight_matrix_P=view_weight_P,
                            omega_market=market_weights,
                            lambda_market=lambda_market,
                            lambda_user=lambda_user,
                            tau_odds=tau_odds,
                            risk_free_rate=risk_free_rate)

    common.Process_Contrains(instrument_list, symbol_constraints, None, bl)
    optimal_weights = bl.optimize()
    print(optimal_weights)


if __name__ == '__main__':

    # from Core.Config import *
    # pathfilename = os.getcwd() + "\..\Config\config2.json"
    # config = Config(pathfilename)
    # database = config.DataBase("JDMySQL")
    path_filename = os.getcwd() + "\..\Config\config_local.json"
    database = Config.create_database(database_type="MySQL", config_file=path_filename, config_field="MySQL")

    # Test_With_Real_Data(database)
    # Test_With_Random_Data()
    Test_View_List(database)
    # Test_Sorted_Views(database)
    # Test_Confidence(database)
    # Run_BL1()