# #!/usr/bin/env python
# # -*- encoding: utf-8 -*-
# '''
# @File    :   level_rate_config.py
# @Contact :   pengwei.sun@aihuishou.com
# @License :   (C)Copyright aihuishou
#
# @Modify Time      @Author       @Version    @Desciption
# ------------      -----------   --------    -----------
# 2021-01-10 16:48   pengwei.sun      1.0         None
# '''
#
# import datetime
# import pandas as pd
# from sklearn.preprocessing import OneHotEncoder
# import numpy as np
# from src.utils.config import logger
# from src.utils.db_processor import postgre_processor,mysql_prediction_processor
# from src.utils.util import get_today, check_date_str, format_date_string
# from src.mobile.levelrate.config_utils import level_rank_df,product_level_df
#
# """
# 取数逻辑 ：
# 按天汇总skuleve的价格：取平均数
#  --b:筛选出具有S,A,B等级，并且等级数存在数大于1的sku
# """
# SETTLE_DATA = """
# select a.secondary_level_template_id as property_level_template_id,CONCAT(a.secondary_level_template_id,'_',a.product_brand_id) as property_template_brand,a.mapping_product_sku_id as product_sku_id,CONCAT(a.secondary_level_template_id,'_',a.product_brand_id,'_',a.mapping_product_sku_id) as product_sku,
# a.mapping_product_level_id as product_level_id,a.mapping_product_level_name as product_level_name,substring(a.mapping_product_level_name,1,1) as level_sub,
# avg(a.real_sell_price) as price,
# COUNT(*) AS sale_num,
# count(distinct a.shop_out_date) as cnt_date
#  from product_price_info  a
# inner join (
# 	select secondary_level_template_id,product_id,mapping_product_sku_id,
# 	count(distinct mapping_product_level_id) AS cnt,
# 	count(distinct product_level_id_2) AS cnt1
# 	from
# 	(
# 	select distinct secondary_level_template_id,product_id,mapping_product_sku_id ,mapping_product_level_name ,
# 	mapping_product_level_id,
# 	case when substring(mapping_product_level_name,1,1) in ('S','A','B')
# 	THEN mapping_product_level_id ELSE null end product_level_id_2
# 	 from product_price_info
# 	where product_brand_id in (52,9,4,7,16,24,484,184,357,661)  and shop_out_date between DATE_FORMAT(date_sub(curdate(),interval 7 day),'%Y%m%d')  and  DATE_FORMAT(date_sub(curdate(),interval 0 day),'%Y%m%d') and product_category_id=1
# 	) a
# 	GROUP BY 1,2
# 	having  count(distinct product_level_id_2)>0 and count(distinct mapping_product_level_id)>=2
# )b
# on  a.product_id=b.product_id and a.mapping_product_sku_id=b.mapping_product_sku_id
#
# where a.product_brand_id in (52,9,4,7,16,24,484,184,357,661)  and a.shop_out_date between DATE_FORMAT(date_sub(curdate(),interval 7 day),'%Y%m%d')  and  DATE_FORMAT(date_sub(curdate(),interval 0 day),'%Y%m%d') and a.product_category_id=1  and mapping_product_level_id>0
#
# GROUP BY 1,2,3,4,5,6
# order by 1,2,3,4
# """
#
# SETTLE_DATA_2 = """
# select a.secondary_level_template_id as property_level_template_id,CONCAT(a.secondary_level_template_id,'_',a.product_brand_id) as property_template_brand,a.mapping_product_sku_id as product_sku_id,CONCAT(a.secondary_level_template_id,'_',a.product_brand_id,'_',a.mapping_product_sku_id) as product_sku,
# a.mapping_product_level_id as product_level_id,a.mapping_product_level_name as product_level_name,substring(a.mapping_product_level_name,1,1) as level_sub,
# avg(a.real_sell_price) as price,
# COUNT(*) AS sale_num,
# count(distinct a.shop_out_date) as cnt_date
#  from product_price_info  a
# inner join (
# 	select secondary_level_template_id,product_id,mapping_product_sku_id,
# 	count(distinct mapping_product_level_id) AS cnt,
# 	count(distinct product_level_id_2) AS cnt1
# 	from
# 	(
# 	select distinct secondary_level_template_id,product_id,mapping_product_sku_id ,mapping_product_level_name ,
# 	mapping_product_level_id,
# 	case when substring(mapping_product_level_name,1,1) in ('S','A','B')
# 	THEN mapping_product_level_id ELSE null end product_level_id_2
# 	 from product_price_info
# 	where product_brand_id in (52,9,4,7,16,24,484,184,357,661)
# 	and shop_out_date between DATE_FORMAT(date_sub(curdate(),interval 14 day),'%Y%m%d')  and DATE_FORMAT(date_sub(curdate(),interval 8 day),'%Y%m%d')
# 	and product_category_id=1  and mapping_product_level_id>0
# 	) a
# 	GROUP BY 1,2
# 	having  count(distinct product_level_id_2)>0 and count(distinct mapping_product_level_id)>=2
# )b
# on  a.product_id=b.product_id and a.mapping_product_sku_id=b.mapping_product_sku_id
#
# where a.product_brand_id in (52,9,4,7,16,24,484,184,357,661)  and a.shop_out_date between DATE_FORMAT(date_sub(curdate(),interval 14 day),'%Y%m%d')  and DATE_FORMAT(date_sub(curdate(),interval 8 day),'%Y%m%d')
#  and a.product_category_id=1
#
# GROUP BY 1,2,3,4,5,6
# order by 1,2,3,4
# """
#
# SETTLE_DATA_3 = """
# select a.secondary_level_template_id as property_level_template_id,CONCAT(a.secondary_level_template_id,'_',a.product_brand_id) as property_template_brand,a.mapping_product_sku_id as product_sku_id,CONCAT(a.secondary_level_template_id,'_',a.product_brand_id,'_',a.mapping_product_sku_id) as product_sku,
# a.mapping_product_level_id as product_level_id,a.mapping_product_level_name as product_level_name,substring(a.mapping_product_level_name,1,1) as level_sub,
# avg(a.real_sell_price) as price,
# COUNT(*) AS sale_num,
# count(distinct a.shop_out_date) as cnt_date
#  from product_price_info  a
# inner join (
# 	select secondary_level_template_id,product_id,mapping_product_sku_id,
# 	count(distinct mapping_product_level_id) AS cnt,
# 	count(distinct product_level_id_2) AS cnt1
# 	from
# 	(
# 	select distinct secondary_level_template_id,product_id,mapping_product_sku_id ,mapping_product_level_name ,
# 	mapping_product_level_id,
# 	case when substring(mapping_product_level_name,1,1) in ('S','A','B')
# 	THEN mapping_product_level_id ELSE null end product_level_id_2
# 	 from product_price_info
# 	where product_brand_id in (52,9,4,7,16,24,484,184,357,661)  and shop_out_date between DATE_FORMAT(date_sub(curdate(),interval 21 day),'%Y%m%d')  and DATE_FORMAT(date_sub(curdate(),interval 15 day),'%Y%m%d')
# 	  and product_category_id=1  and mapping_product_level_id>0
# 	) a
# 	GROUP BY 1,2
# 	having  count(distinct product_level_id_2)>0 and count(distinct mapping_product_level_id)>=2
# )b
# on  a.product_id=b.product_id and a.mapping_product_sku_id=b.mapping_product_sku_id
#
# where a.product_brand_id in (52,9,4,7,16,24,484,184,357,661)
# and a.shop_out_date between DATE_FORMAT(date_sub(curdate(),interval 21 day),'%Y%m%d')  and DATE_FORMAT(date_sub(curdate(),interval 15 day),'%Y%m%d')
#  and a.product_category_id=1
#
# GROUP BY 1,2,3,4,5,6
# order by 1,2,3,4
# """
#
# #--and a.product_id=32291 S,A,B基准价格
# BASE_PRICE_DATA = """
# SELECT level_name,price as base_price FROM c_level_base_price
# """
#
#
# class TemplateLevelRate:
#     def __init__(self,sql):
#
#         # sql = SETTLE_DATA
#         #获取型号对应的skulevel聚合后的数据
#         if sql is None:
#             self.query_sql=SETTLE_DATA
#         else:
#             self.query_sql=sql
#
#         self.data = mysql_prediction_processor.load_sql(self.query_sql)
#         self.data = self.data.loc[self.data.product_level_id > 0]
#         #基础等级价格
#         self.base_price_df = mysql_prediction_processor.load_sql(BASE_PRICE_DATA)
#         self.level_rank_df =level_rank_df
#         self.product_level_df =product_level_df
#
#     def fun1(self):
#         #根据售卖数量和等级计算每个等级得分，以便后续sku 筛选:高等级的等级id较小，所以采取1000-等级id，使得，相同数量的情况下，高等级的优先选择
#         self.data['score']=1000-self.data.product_level_id+self.data.sale_num*1000
#         #首先筛选出sku中 有S,A,B等级数据
#         data_sku_base_level_df=self.data.loc[self.data.level_sub.isin(['S','A','B'])]
#
#         # a= data_sku_base_level_df.groupby(by=['product_id','product_sku_id'], as_index=False)['score'].max()
#         # index= data_sku_base_level_df.groupby(by=['product_id','product_sku_id'])['score'].idxmax()
#
#         #找出数据中score得分最高的level
#         base_level=data_sku_base_level_df.loc[data_sku_base_level_df.sort_values(['product_sku','score']).drop_duplicates('product_sku',keep='last').index]
#
#         base_level.rename(columns={'price':'price_base','score':'score_base','sale_num':'base_sale_num'},inplace=True)
#
#         # base_level
#         #统计出满足条件的型号下 sku的数量
#         base_product_sku_size=data_sku_base_level_df[['property_template_brand','product_sku_id']].groupby('property_template_brand').agg({'product_sku_id': pd.Series.nunique}).reset_index()
#         base_product_sku_size.rename(columns={'product_sku_id':'product_sku_num'},inplace=True)
#
#
#         #给筛选出的基础数据拼接上其对应的基础价格
#         base_level=base_level.merge(self.base_price_df,left_on=['product_level_name'],right_on=['level_name'])
#         base_base_level_sale_num=base_level.groupby(by=['property_template_brand'])['base_sale_num'].agg({'base_level_sale_sum':'sum'})
#         base_level=base_level.merge(base_base_level_sale_num,on='property_template_brand')
#         base_level['sku_weight']=base_level.base_sale_num/base_level.base_level_sale_sum
#
#         # 给数据拼接上他们各自的基础信息
#         df_all = self.data.merge(base_level[['product_sku', 'price_base', 'score_base', 'base_price']], on=['product_sku'])
#
#         base_product_sale_num=df_all.groupby('property_template_brand')['sale_num'].agg({'product_sale_sum':'sum' }).reset_index()
#         base_product_info=base_product_sku_size.merge(base_product_sale_num,left_on=['property_template_brand'],right_on=['property_template_brand'])
#         return base_level,base_product_info
#
#     def fun2(self,base_level,base_product_info):
#         # 给数据拼接上他们各自的基础信息
#         df_all = self.data.merge(base_level[['product_sku', 'price_base', 'score_base', 'base_price','sku_weight','base_level_sale_sum','base_sale_num']],
#                                  on=['product_sku'])
#
#         #计算等级比率，并还原成基础价格
#         df_all['level_rate']=df_all.price/df_all.price_base
#         df_all['to_base_price']=df_all.base_price*df_all.level_rate
#         df_all = df_all.loc[df_all.product_level_id > 0]
#         df_all['product_level_id'] = df_all['product_level_id'].astype(int)
#         #型号维度的汇总
#         df_all['product_level_id_name']=df_all['property_template_brand'].astype(str)+'_'+df_all['product_level_id'].astype(str)+'_'+df_all['product_level_name']
#
#         #求出汇总后的数据的，平均价格，平均售卖数量等信息
#         result1=df_all.groupby(by='product_level_id_name')['to_base_price'].agg({'price_mean':'mean','price_max':'max','price_min':'min','price_media':'median'}).reset_index()#这里
#         #型号下 各个等级的 售卖数量相关信息
#         cnt_res=df_all.groupby(by='product_level_id_name')['sale_num'].agg({'sale_sum':'sum','sku_cnt':'count'}).reset_index()#这里
#
#         df_all=df_all.merge(cnt_res,on='product_level_id_name')
#
#         #等级价格汇总时 ，是以此等级售卖出的数量占此等级的比例为权重，进行加权计算
#         df_all['level_weight']=df_all['sale_num']/df_all['sale_sum']
#         df_all['sku_base_level_weight']=df_all['base_sale_num']/df_all['base_level_sale_sum']
#         df_all['zh_level_weight']=df_all['sku_base_level_weight']*df_all['level_weight']
#         # df_all['zh_level_weight'](by='product_level_id_name').agg({'price_mean':'sum'})
#
#         weight_res = df_all.groupby(by='product_level_id_name')['zh_level_weight'].agg(
#             {'weight_sum': 'sum'}).reset_index()  # 这里
#
#
#         df_all=df_all.merge(weight_res[['product_level_id_name','weight_sum']],on=['product_level_id_name'])
#         df_all['weight']=df_all['zh_level_weight']/df_all.weight_sum
#
#         df_all['to_base_price_weight']=df_all['weight']*df_all['to_base_price']
#         result=df_all.groupby(by='product_level_id_name')['to_base_price_weight'].agg({'price_mean':'sum','price_max':'sum','price_min':'sum','price_media':'sum'}).reset_index()#这里
#
#         resulttmp=result.merge(result1,on='product_level_id_name')
#
#         # result=result.sort_values('price_media',ascending=False).reset_index()
#         result['property_template_id']=result['product_level_id_name'].str.split('_',expand=True)[0].astype(np.int64)
#         result['level_id']=result['product_level_id_name'].str.split('_',expand=True)[2].astype(np.int64)
#         result['brand_id']=result['product_level_id_name'].str.split('_',expand=True)[1].astype(np.int64)
#         result['property_template_brand'] = result['property_template_id'].astype(str) + '_' + result[
#             'brand_id'].astype(str)
#
#         result=result.merge(base_product_info,how='inner',left_on=['property_template_brand'],right_on=['property_template_brand'])
#         result=result.merge(self.level_rank_df,left_on=['level_id'],right_on=['product_level_id'])
#
#         result_t=result.merge(cnt_res,on='product_level_id_name')
#
#         #求出汇总后的等级比率和sku占比
#         result_t['rate']=result_t.price_media/10000
#
#         result_t['sku_rate']=result_t.sku_cnt/result_t.product_sku_num
#
#
#
#         result_t=result_t.sort_values('product_level_order_rank',ascending=True)
#         result_t['rate_f']=result_t['rate']
#         result_t=result_t.reset_index()
#         # result_t_tmp=result_t
#         # size=result_t.shape[0]
#         resDf = pd.DataFrame(columns=result_t.columns.tolist())
#         grouped = result_t.groupby('property_template_brand')
#         del result_t
#         for name, group in grouped:
#             group.reset_index(drop=True, inplace=True)
#             group=group.sort_values('product_level_order_rank', ascending=True)
#             size = group.shape[0]
#             result_t = group
#             logger.info('property_template_brand:{}'.format(group.loc[0,'property_template_brand']))
#             # if group.loc[0,'property_template_brand']=='36_24':
#             #     print('dsds')
#
#             for index in range(size):
#
#                 if index==0 :
#                     if result_t.loc[index, 'sku_rate']>0.3:
#                         continue
#                     for next in range(1,size,1):
#                         if next>=size:
#                             continue
#                         if result_t.loc[next, 'sku_rate']>0.3:
#                             break
#
#                     if (next<size) and ((result_t.loc[index, 'rate_f']-result_t.loc[next, 'rate_f'])/(next-index)>0.10):
#                         # for i in range(next-1,index,-1):
#                         result_t.loc[index, 'rate_f']=result_t.loc[next, 'rate_f']+0.1*(next-index)
#
#
#
#                     # if result_t.loc[index,'rate_f']>1.2:
#                         # result_t.loc[index, 'rate_f']=1.2
#                     continue
#
#                 # if index==size-1:
#                 #     break
#                 if (result_t.loc[index,'product_level_id_name'].find('S')>0 \
#                     or result_t.loc[index,'product_level_id_name'].find('A')>0 \
#                     or result_t.loc[index, 'product_level_id_name'].find('B') > 0):
#
#                     if result_t.loc[index, 'rate_f']-result_t.loc[index-1,'rate_f']>=0 and result_t.loc[index, 'sku_rate']<0.3:
#                         result_t.loc[index, 'rate_f'] = result_t.loc[index-1, 'rate_f']-0.01
#                     if result_t.loc[index, 'rate_f']-result_t.loc[index-1,'rate_f']>=0 and result_t.loc[index, 'sku_rate']>=0.3 :
#                         rate_flag=result_t.loc[index - 1, 'rate_f']
#                         flag_index=index - 1
#                         x = index - 1
#                         for x in range(index-1, 0, -1):
#                             if result_t.loc[x, 'sku_rate']>=0.3:
#                                 rate_flag=result_t.loc[x, 'rate_f']
#                                 flag_index=x
#                                 break
#                         if index-x>1 and rate_flag-result_t.loc[index, 'rate_f']>0:
#                             rate_bulk=(rate_flag - result_t.loc[index, 'rate_f'])/(index-x)
#                             for bulk in range(flag_index+1,index-1,1):
#                                 result_t.loc[bulk, 'rate_f'] = result_t.loc[bulk + 1, 'rate_f'] - rate_bulk
#                     continue
#                 if result_t.loc[index, 'rate_f']-result_t.loc[index-1,'rate_f']>0 \
#                     and result_t.loc[index, 'sku_rate']<0.3:
#                     result_t.loc[index, 'rate_f'] = result_t.loc[index - 1, 'rate_f'] - 0.02
#             resDf = resDf.append(result_t.copy())
#         # resDf['property_template_brand']=resDf.property_template_brand.astype(np.int64)
#         # def fun3(process_df, rate_f='mean_rate', w='w_mean'):
#         #     process_df = process_df.reset_index()
#         #     process_df[rate_f] = process_df[rate_f].fillna(-1)
#         #     resDf = pd.DataFrame(columns=process_df.columns.tolist())
#         #
#         #     grouped = process_df.groupby('template_brand')
#         #     for name, group in grouped:
#         #         size = group.shape[0]
#         #         group.reset_index(drop=True, inplace=True)
#         #         for i in range(size):
#         #             if group.loc[i, rate_f] == -1:
#         #                 if i == 0:
#         #                     for next in range(i + 1, size, 1):
#         #                         if group.loc[next, rate_f] > 0:
#         #                             break
#         #                     group.loc[i, rate_f] = group.loc[next, rate_f] + next * 0.03
#         #                     continue
#         #                 if i > 0:
#         #                     next = size
#         #                     for next in range(i + 1, size, 1):
#         #                         if group.loc[next, rate_f] > 0 and group.loc[next, w] >= 0.5:
#         #                             break
#         #                     if next < size - 1:
#         #                         group.loc[i, rate_f] = group.loc[i - 1, rate_f] - (
#         #                                     group.loc[i - 1, rate_f] - group.loc[next, rate_f]) / (next - i + 1)
#         #
#         #                     else:
#         #                         varience = (group.loc[i - 1, rate_f] - 0.1) / (next - i + 1)
#         #                         group.loc[i, rate_f] = group.loc[i - 1, rate_f] - varience
#         #                     continue
#         #             else:
#         #                 continue
#         #         resDf = resDf.append(group.copy())
#         #     print(1)
#         #     return resDf
#
#         resDf['level_id']=resDf.level_id.astype(np.int64)
#         product_level_df=self.product_level_df.merge(resDf,how='left',left_on=['template_brand','level_id'],right_on=['property_template_brand','level_id'])
#
#         product_level_df=product_level_df.merge(self.level_rank_df,left_on=['level_id'],right_on=['product_level_id'])
#         product_level_df=product_level_df.sort_values(['level_template_id','product_level_order_rank_y'],ascending=True)
#
#         print(1)
#         return product_level_df
#
# def fun3(process_df,rate_f='mean_rate',w='w_mean'):
#     process_df=process_df.reset_index()
#     process_df[rate_f]=process_df[rate_f].fillna(-1)
#     resDf = pd.DataFrame(columns=process_df.columns.tolist())
#
#     grouped = process_df.groupby('template_brand')
#     for name, group in grouped:
#         size=group.shape[0]
#         group.reset_index(drop=True, inplace=True)
#         for i in range(size):
#             if group.loc[i,rate_f]==-1:
#                 if i==0:
#                     for next in range(i+1,size,1):
#                         if group.loc[next, rate_f]>0:
#                             break
#                     group.loc[i, rate_f]=group.loc[next, rate_f]+next*0.03
#                     continue
#                 if i>0:
#                     next=size
#                     for next in range(i+1,size,1):
#                         if group.loc[next, rate_f]>0 and group.loc[next, w]>=0.5:
#                             break
#                     if next<size-1:
#                         group.loc[i, rate_f]=group.loc[i-1, rate_f]-(group.loc[i-1, rate_f]-group.loc[next, rate_f])/(next-i+1)
#
#                     else:
#                         varience=(group.loc[i - 1, rate_f]-0.1)/(next-i+1)
#                         group.loc[i, rate_f] = group.loc[i - 1, rate_f]-varience
#                     continue
#             else:
#                 continue
#         resDf = resDf.append(group.copy())
#     print(1)
#     return resDf
#
#
#
#
#
# def process_product_level_rate(sql):
#
#     model=TemplateLevelRate(sql)
#     base_level,base_product_info=model.fun1()
#     product_level_df=model.fun2(base_level,base_product_info)
#     # product_level_df=model.fun3(product_level_df)
#     return product_level_df
#
# def process_template_level_rate_final():
#     product_level_df1=process_product_level_rate(SETTLE_DATA)
#     product_level_df2=process_product_level_rate(SETTLE_DATA_2)
#     product_level_df3=process_product_level_rate(SETTLE_DATA_3)
#
#     tmp1=product_level_df1[['template_brand','level_id','sku_rate','rate_f']]
#     tmp1.rename(columns={'rate_f':'rate_f1','sku_rate':'sku_rate1'},inplace=True)
#     tmp2=product_level_df2[['template_brand','level_id','sku_rate','rate_f']]
#     tmp2.rename(columns={'rate_f':'rate_f2','sku_rate':'sku_rate2'},inplace=True)
#
#
#     tmp3=product_level_df3[['template_brand','level_id','sku_rate','rate_f']]
#     tmp3.rename(columns={'rate_f':'rate_f3','sku_rate':'sku_rate3'},inplace=True)
#
#     tmp1=tmp1.merge(tmp2,left_on=['template_brand','level_id'],right_on=['template_brand','level_id'])
#     tmp1=tmp1.merge(tmp3,on=['template_brand','level_id'])
#
#
#     tmp1.loc[tmp1.rate_f1>0 ,'w_f1']=0.5
#     tmp1.loc[tmp1.rate_f2>0 ,'w_f2']=0.3
#     tmp1.loc[tmp1.rate_f3>0 ,'w_f3']=0.2
#
#     tmp1['rate_f1_w']=tmp1.w_f1*tmp1.rate_f1
#     tmp1['rate_f2_w']=tmp1.w_f2*tmp1.rate_f2
#     tmp1['rate_f3_w']=tmp1.w_f3*tmp1.rate_f3
#     tmp1['mean_rate'] = tmp1[['rate_f1_w', 'rate_f2_w', 'rate_f3_w']].sum(axis=1)/tmp1[['w_f1', 'w_f2', 'w_f3']].sum(axis=1)
#     tmp1['w_mean'] = tmp1[['w_f1', 'w_f2', 'w_f3']].sum(axis=1)
#     tmp1['mean_rate_tmp']=tmp1['mean_rate']
#
#     result_df=fun3(tmp1)
#     result_df.to_csv('/data/sunpengwei/tmp/template_level_rate.csv', encoding='utf-8-sig')
#     return result_df
#
# def predict_price_data(result_df):
#     # predict_price_sql
#     price_df=mysql_prediction_processor.load_sql(predict_price_sql)
#     # price_df['property_template_brand'] = price_df.property_template_brand.astype(np.int64)
#     price_df['product_level_key'] = price_df.product_level_key.astype(np.int64)
#
#     # result_df['property_template_brand'] = result_df.property_template_brand.astype(np.int64)
#     result_df['level_id'] = result_df.level_id.astype(np.int64)
#
#     #获取到等级比率
#     price_df=price_df.merge(result_df[['template_brand','level_id','mean_rate']],left_on=['property_template_brand','product_level_key'],right_on=['template_brand','level_id'])
#
#     #计算score得分 找出等级高 并且销售数量多的等级价格作为预测价格的基准
#     price_df['score']=1000-price_df.product_level_key+price_df.qty*1000
#     data_sku_base_level_df = price_df.loc[price_df.level_sub.isin([ 'A', 'B'])]
#
#     # a= data_sku_base_level_df.groupby(by=['product_id','product_sku_id'], as_index=False)['score'].max()
#     # index= data_sku_base_level_df.groupby(by=['product_id','product_sku_id'])['score'].idxmax()
#
#     # 找出数据中score得分最高的level
#     base_level = data_sku_base_level_df.loc[
#         data_sku_base_level_df.sort_values(['product_sku', 'score']).drop_duplicates('product_sku', keep='last').index]
#     base_level['base_price']=base_level['forecast_reference_price']/base_level['mean_rate']
#
#     price_df=price_df.merge(base_level[['product_sku','base_price']],on='product_sku')
#     price_df['level_rate_price']=price_df.base_price*price_df.mean_rate #根据等级比率换算的价格 作为后续判断等级之间价差的指标
#     # price_df['level_rate_price']=price_df.base_price*price_df.mean_rate
#     price_df['avg_predict_basep_price'] = price_df[['saleprice', 'forecast_reference_price']].mean(axis=1) #计算预测价格与昨日上拍价之间的均值
#
#     price_df['predict_level_price_rate']=abs(price_df['forecast_reference_price']-price_df['level_rate_price'])/price_df.level_rate_price #预测价与比率换算价的价差
#     price_df['saleprice_level_price_rate']=abs(price_df['saleprice']-price_df['level_rate_price'])/price_df.level_rate_price
#     price_df['mean_price_rate']=abs(price_df['avg_predict_basep_price']-price_df['level_rate_price'])/price_df.level_rate_price
#     price_df['qty']=price_df['qty'].astype(np.int64)
#
#     price_df['forecast_reference_pricet'] = price_df['forecast_reference_price'] #对比排查问题方便
#     price_df['salepricet'] = price_df['saleprice'] #对比排查问题方便
#     price_df['qtyt'] = price_df['qty'] #对比排查问题方便
#
#     price_df['process_price'] = price_df['forecast_reference_price']
#     price_df['process_price_f'] = price_df['forecast_reference_price'] #对比排查问题方便
#     price_df['flag'] = 0
#     price_df['diff_rate']=price_df['saleprice_level_price_rate']
#     resDf = pd.DataFrame(columns=price_df.columns.tolist())
#
#
#     grouped = price_df.groupby(['property_template_brand','product_sku_key'])
#     for name, group in grouped:
#         size=group.shape[0]
#
#         group=group.sort_values(['mean_rate'],ascending=False)
#         # group['levelname']=group.product_level_name
#         group.reset_index(drop=True, inplace=True)
#         for i in range(size):
#             predict_rate=group.loc[i,'predict_level_price_rate']
#             saleprice_rate=group.loc[i,'saleprice_level_price_rate']
#             mean_price_rate=group.loc[i,'mean_price_rate']
#             level_sub=group.loc[i,'level_sub']
#             n_p=1
#             if group.loc[i,'forecast_reference_price']-group.loc[i,'level_rate_price']<0:
#                 n_p=-1
#
#             qty=group.loc[i,'qty']
#             if level_sub in ['S','A','B'] and predict_rate>0.1:
#                 if i==0: #第一个等级大于异常范围时，如果人工价格大于预测价的下一个等级，则取人工价，否则取下个等级的预测价 *1.05
#                     if group.loc[i, 'saleprice']>group.loc[i+1, 'process_price'] :
#                         group.loc[i, 'process_price'] = group.loc[i, 'saleprice']
#                     else:
#                         group.loc[i, 'process_price']=group.loc[i+1, 'process_price']*1.05
#                 elif predict_rate>saleprice_rate:
#                     if saleprice_rate<0.1:
#                         if saleprice_rate>mean_price_rate:
#                             group.loc[i,'process_price']=group.loc[i,'avg_predict_basep_price']
#                         else:
#
#                             group.loc[i, 'process_price'] = group.loc[i, 'saleprice']
#                     else:
#                         if qty>0:
#                             group.loc[i, 'process_price'] = group.loc[i, 'saleprice']*0.95
#                         else:
#                             group.loc[i, 'process_price']=group.loc[i, 'level_rate_price']*(1+0.1*n_p)
#                 else:
#                     if qty > 0:
#                         group.loc[i, 'process_price'] = group.loc[i, 'saleprice'] * 0.95
#                     else:
#                         group.loc[i, 'process_price'] = group.loc[i, 'level_rate_price'] * (1+0.1*n_p)
#                 group.loc[i,'flag']=1
#
#             elif level_sub  in ['C','D','E'] and predict_rate>0.15:
#                 if i==0:
#                     group.loc[i, 'process_price'] = group.loc[i, 'saleprice']
#                 elif predict_rate>saleprice_rate:
#                     if saleprice_rate<0.1:
#                         if saleprice_rate>mean_price_rate:
#                             group.loc[i,'process_price']=group.loc[i,'avg_predict_basep_price']
#                         else:
#
#                             group.loc[i, 'process_price'] = group.loc[i, 'saleprice']
#                     else:
#                         if qty>0:
#                             group.loc[i, 'process_price'] = group.loc[i, 'saleprice']*0.95
#                         else:
#                             group.loc[i, 'process_price']=group.loc[i, 'level_rate_price']*(1+0.15*n_p)
#                 else:
#                     if qty > 0:
#                         group.loc[i, 'process_price'] = group.loc[i, 'saleprice'] * 0.95
#                     else:
#                         group.loc[i, 'process_price'] = group.loc[i, 'level_rate_price'] * (1+0.1*n_p)
#                 group.loc[i,'flag']=1
#             elif level_sub not in ['S','A','B','C', 'D', 'E'] and predict_rate > 0.25:
#                 if i == 0:
#                     group.loc[i, 'process_price'] = group.loc[i, 'saleprice']
#                 elif predict_rate > saleprice_rate:
#                     if saleprice_rate < 0.1:
#                         if saleprice_rate > mean_price_rate:
#                             group.loc[i, 'process_price'] = group.loc[i, 'avg_predict_basep_price']
#                         else:
#
#                             group.loc[i, 'process_price'] = group.loc[i, 'saleprice']
#                     else:
#                         if qty > 0:
#                             group.loc[i, 'process_price'] = group.loc[i, 'saleprice'] * 0.95
#                         else:
#                             group.loc[i, 'process_price'] = group.loc[i, 'level_rate_price'] * (1+0.25*n_p)
#                 else:
#                     if qty > 0:
#                         group.loc[i, 'process_price'] = group.loc[i, 'saleprice'] * 0.95
#                     else:
#                         group.loc[i, 'process_price'] = group.loc[i, 'level_rate_price'] * (1+0.25*n_p)
#                 group.loc[i, 'flag'] = 1
#             # print('dsd')
#         #此部分保证S,A,B等级之间的防倒挂处理
#         group['process_price_f'] = group['process_price']
#         for i in range(size):
#             level_sub = group.loc[i, 'level_sub']
#             if i>0 and level_sub in ['S','A','B'] and group.loc[i, 'process_price_f'] < group.loc[i + 1, 'process_price_f'] :
#                 # if group.loc[i, 'process_price_f']>group.loc[i-1, 'process_price_f']:
#                 #     group.loc[i, 'process_price_f']=group.loc[i-1, 'process_price_f']-10
#
#                 if group.loc[i, 'process_price_f'] < group.loc[i - 1, 'process_price_f']:
#                     next=i+1
#                     # for next in range(i+1,size):
#                     #     if  level_sub not in ['S','A','B']:
#                     #         break
#                     if next<size-1:
#                         group.loc[i, 'process_price_f']=group.loc[next, 'process_price_f']+(group.loc[i-1, 'process_price_f']-group.loc[next, 'process_price_f'])/(next-i+1)
#                     else:
#                         group.loc[i, 'process_price_f'] = group.loc[i-1, 'process_price_f']*0.98
#         group['diff_rate']=group['process_price_f']/group['saleprice']-1
#         resDf=resDf.append(group)
#     return resDf
#
# def main():
#     result_df=process_template_level_rate_final()
#     df=predict_price_data(result_df)
#     df.to_csv('/data/sunpengwei/tmp/predict_price_level_rate.csv', encoding='utf-8-sig')
#     print('1')
#
# if __name__ == '__main__':
#     main()