#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File    :   level_rate_config.py    
@Contact :   pengwei.sun@aihuishou.com
@License :   (C)Copyright aihuishou

@Modify Time      @Author       @Version    @Desciption
------------      -----------   --------    -----------
2021-01-10 16:48   pengwei.sun      1.0         None
'''

import datetime
import pandas as pd
import numpy as np
import os,sys
sys.path.append(os.getcwd())
from src.utils.config import logger
from src.mobile.android_levelrate.config_utils import s_min_max_range_fun, sale_num_price_fun
from src.utils.feishu_message import feishu_messager
from src.utils.db_processor import mysql_prediction_processor
from src.utils.util import get_today, check_date_str, format_date_string, format_date
from src.mobile.android_levelrate.model_template_level2_rate_v1 import process_android_template_level_rate_final
import time
import pickle
from src.mobile.android_levelrate.config_utils import level_rank_df,product_level_df,apple_product_ids
from src.mobile.android_levelrate.reverse.sku2_skulevel_period_price import get_period_price_fun
from src.mobile.android_levelrate.reverse.function_utils import save_pickle_data, load_pickle_data,check_conflict_file
from src.mobile.android_levelrate.reverse.sku2_common_variable import FILE_DIR
import concurrent.futures
from multiprocessing import Pool
cpu_worker_num=16
"""
取数逻辑 ：
按天汇总skuleve的价格：取平均数
 --b:筛选出具有S,A,B等级，并且等级数存在数大于1的sku
 -- and a.product_id  in (24347,24348,26464,26536,29261,29260,34576,35106,34575)
"""
SETTLE_DATA = """
select a.product_id,a.mapping_product_sku_id as product_sku_id,CONCAT(a.product_id,'_',a.mapping_product_sku_id) as product_sku,
a.mapping_product_level_id as product_level_id,a.mapping_product_level_name as product_level_name,substring(a.mapping_product_level_name,1,1) as level_sub,
avg(a.real_sell_price) as price,a.product_brand_id,
COUNT(*) AS sale_num,
count(distinct shop_out_date) as cnt_date
 from product_price_info_ljc  a
inner join (
	select product_id,mapping_product_sku_id,
	count(distinct mapping_product_level_id) AS cnt,
	count(distinct product_level_id_2) AS cnt1
	from
	(
	select distinct product_id,mapping_product_sku_id ,mapping_product_level_name ,
	mapping_product_level_id,
	case when substring(mapping_product_level_name,1,1) in ('S','A','B','C','D') 
	THEN mapping_product_level_id ELSE null end product_level_id_2 
	 from product_price_info_ljc 
	where  shop_out_date between DATE_FORMAT(date_sub(curdate(),interval 7 day),'%Y%m%d') 
	and DATE_FORMAT(date_sub(curdate(),interval 0 day),'%Y%m%d')  
	and product_category_id=1 and mapping_product_level_id>0
	and (sale_out_cnt is  null OR sale_out_cnt<=1)
	) a
	GROUP BY 1,2
	having  count(distinct product_level_id_2)>0 and count(distinct mapping_product_level_id)>=2
)b
on  a.product_id=b.product_id and a.mapping_product_sku_id=b.mapping_product_sku_id

where  a.shop_out_date>=DATE_FORMAT(date_sub(curdate(),interval 7 day),'%Y%m%d')  
and DATE_FORMAT(date_sub(curdate(),interval 0 day),'%Y%m%d') and a.product_category_id=1 
and  a.product_source_id in (101,103)
and (a.sale_out_cnt is  null OR a.sale_out_cnt<=1)
and a.mapping_product_level_name is not null
and a.document_item_id not in (SELECT mapping.document_item_id
    FROM mobile_anomaly_one_day_detection detection 
    JOIN mobile_anomaly_one_day_mapping mapping ON detection.lookup_key =  mapping.lookup_key
    WHERE (detection.audit_status IS NULL OR detection.audit_status = 0) AND detection.create_time >= date_sub(curdate(),interval 42 day) 
)
GROUP BY 1,2,3,4,5,6
order by 1,2,3,4
"""

SETTLE_DATA_2 = """
select a.product_id,a.mapping_product_sku_id as product_sku_id,CONCAT(a.product_id,'_',a.mapping_product_sku_id) as product_sku,
a.mapping_product_level_id as product_level_id,a.mapping_product_level_name as product_level_name,substring(a.mapping_product_level_name,1,1) as level_sub,
avg(a.real_sell_price) as price,a.product_brand_id,
COUNT(*) AS sale_num,
count(distinct shop_out_date) as cnt_date
 from product_price_info_ljc  a
inner join (
	select product_id,mapping_product_sku_id,
	count(distinct mapping_product_level_id) AS cnt,
	count(distinct product_level_id_2) AS cnt1
	from
	(
	select distinct product_id,mapping_product_sku_id ,mapping_product_level_name ,
	mapping_product_level_id,
	case when substring(mapping_product_level_name,1,1) in ('S','A','B','C','D') 
	THEN mapping_product_level_id ELSE null end product_level_id_2 
	 from product_price_info_ljc 
	where shop_out_date between DATE_FORMAT(date_sub(curdate(),interval 14 day),'%Y%m%d')  and DATE_FORMAT(date_sub(curdate(),interval 8 day),'%Y%m%d')  
	and product_category_id=1  and mapping_product_level_id>0
	and (sale_out_cnt is  null OR sale_out_cnt<=1)
	) a
	GROUP BY 1,2
	having  count(distinct product_level_id_2)>0 and count(distinct mapping_product_level_id)>=2
)b
on  a.product_id=b.product_id and a.mapping_product_sku_id=b.mapping_product_sku_id

where a.shop_out_date between DATE_FORMAT(date_sub(curdate(),interval 14 day),'%Y%m%d')  and DATE_FORMAT(date_sub(curdate(),interval 8 day),'%Y%m%d')  
 and a.product_category_id=1
 and  a.product_source_id in (101,103)
 and (a.sale_out_cnt is  null OR a.sale_out_cnt<=1)
and a.mapping_product_level_name is not null
and a.document_item_id not in (SELECT mapping.document_item_id
    FROM mobile_anomaly_one_day_detection detection 
    JOIN mobile_anomaly_one_day_mapping mapping ON detection.lookup_key =  mapping.lookup_key
    WHERE (detection.audit_status IS NULL OR detection.audit_status = 0) AND detection.create_time >= date_sub(curdate(),interval 42 day) 
)
GROUP BY 1,2,3,4,5,6
order by 1,2,3,4
"""

SETTLE_DATA_3 = """
select a.product_id,a.mapping_product_sku_id as product_sku_id,CONCAT(a.product_id,'_',a.mapping_product_sku_id) as product_sku,
a.mapping_product_level_id as product_level_id,a.mapping_product_level_name as product_level_name,substring(a.mapping_product_level_name,1,1) as level_sub,
avg(a.real_sell_price) as price,a.product_brand_id,
COUNT(*) AS sale_num,
count(distinct shop_out_date) as cnt_date
 from product_price_info_ljc  a
inner join (
	select product_id,mapping_product_sku_id,
	count(distinct mapping_product_level_id) AS cnt,
	count(distinct product_level_id_2) AS cnt1
	from
	(
	select distinct product_id,mapping_product_sku_id ,mapping_product_level_name ,
	mapping_product_level_id,
	case when substring(mapping_product_level_name,1,1) in ('S','A','B','C','D') 
	THEN mapping_product_level_id ELSE null end product_level_id_2 
	 from product_price_info_ljc 
	where shop_out_date between DATE_FORMAT(date_sub(curdate(),interval 21 day),'%Y%m%d')  and DATE_FORMAT(date_sub(curdate(),interval 15 day),'%Y%m%d')
	  and product_category_id=1  and mapping_product_level_id>0
	  and (sale_out_cnt is  null OR sale_out_cnt<=1)
	) a
	GROUP BY 1,2
	having  count(distinct product_level_id_2)>0 and count(distinct mapping_product_level_id)>=2
)b
on  a.product_id=b.product_id and a.mapping_product_sku_id=b.mapping_product_sku_id

where a.shop_out_date between DATE_FORMAT(date_sub(curdate(),interval 21 day),'%Y%m%d')  and DATE_FORMAT(date_sub(curdate(),interval 15 day),'%Y%m%d') 
 and a.product_category_id=1
 and  a.product_source_id in (101,103)
 and (a.sale_out_cnt is  null OR a.sale_out_cnt<=1)
and a.mapping_product_level_name is not null
and a.document_item_id not in (SELECT mapping.document_item_id
    FROM mobile_anomaly_one_day_detection detection 
    JOIN mobile_anomaly_one_day_mapping mapping ON detection.lookup_key =  mapping.lookup_key
    WHERE (detection.audit_status IS NULL OR detection.audit_status = 0) AND detection.create_time >= date_sub(curdate(),interval 42 day) 
)
GROUP BY 1,2,3,4,5,6
order by 1,2,3,4
"""

# --and a.product_id=32291 S,A,B基准价格
BASE_PRICE_DATA = """
SELECT level_name,price as base_price FROM c_level_base_price
"""
# 等级预先排序


predict_price_sql = """
select a.*,substring(a.product_level_name,1,1) as level_sub,CONCAT(a.product_key,'_',a.product_sku_key) as product_sku,
CONCAT(b.level_template_id,'_',b.product_brand_id) as property_template_brand,
b.product_id,
b.rank,b.price_3,b.price_2,b.price_1,b.saleprice,b.qty,
a.forecast_reference_price as bi_price,
case when c.avg_sell_price>0 then  c.avg_sell_price when c.avg_sell_price is null and c.avg_sell_price>0 then c.avg_sell_price  else a.forecast_reference_price end as avg_sell_price,
case when c.min_sell_price is null then  0 else c.min_sell_price end as min_sell_price,
case when c.sale_num is null then  b.qty else c.sale_num end as sale_num
from  price_prediction a
inner join `imp_mysql_base_pricedata_category_phone`  b
on a.`product_sku_key`  =b.product_sku_id and a.product_level_key=b.product_level_id
left join (
        select a.product_id,a.mapping_product_sku_id,
        a.mapping_product_level_id as product_level_id,level.level_name as product_level_name,substring(level.level_name,1,1) as level_sub,
        avg(case when a.shop_out_date between DATE_FORMAT(date_sub(curdate(),interval 7 day),'%Y%m%d')  and DATE_FORMAT(date_sub(curdate(),interval 0 day),'%Y%m%d') then a.real_sell_price else null end) as avg_sell_price,
        min(a.real_sell_price) as min_sell_price,
        COUNT(*) AS sale_num,
        count(distinct shop_out_date) as cnt_date
         from product_price_info_ljc  a
        inner join (
            select product_id,mapping_product_sku_id,
            count(distinct product_level_id) AS cnt,
            count(distinct product_level_id_2) AS cnt1
            from
            (
            select distinct a.product_id,a.mapping_product_sku_id,b.level_name as product_level_name,
            a.mapping_product_level_id as product_level_id,
            case when substring(b.level_name,1,1) in ('S','A','B','C','D') 
            THEN product_level_id ELSE null end product_level_id_2 
             from product_price_info_ljc a
             inner join warehouse.dim_product_level b
             on a.mapping_product_level_id = b.level_id
            where a.shop_out_date>=DATE_FORMAT(date_sub(curdate(),interval 42 day),'%Y%m%d')  and a.product_category_id=1  and mapping_product_level_id>0
            and  (a.sale_out_cnt is  null OR a.sale_out_cnt<=1)
            ) a
            GROUP BY 1,2
            having  count(distinct product_level_id_2)>0 and count(distinct product_level_id)>=2
        )b
        on  a.product_id=b.product_id and a.mapping_product_sku_id=b.mapping_product_sku_id
        inner join warehouse.dim_product_level level
             on a.mapping_product_level_id = level.level_id
        where a.shop_out_date>=DATE_FORMAT(date_sub(curdate(),interval 42 day),'%Y%m%d') 
        and a.product_category_id=1  and (a.sale_out_cnt is  null OR a.sale_out_cnt<=1)
        GROUP BY 1,2,3,4,5

)c
on a.`product_sku_key`  =c.mapping_product_sku_id and a.product_level_key=c.product_level_id
"""
# and b.product_id=29693 38972 6262312,
# and a.product_sku_key in (6127503,6262878)

product_avg_level_rate_sql = """
select product_id,level_id,avg(mean_rate) as mean_rate,
avg(template_mean_rate) as template_mean_rate
 from
product_sku2_level_rate_final_brand_android_v1 
where date>DATE_FORMAT(date_sub(curdate(),interval 7 day),'%Y%m%d')
and mean_rate>0
group by 1,2
having avg(mean_rate)>0
"""

INSERT_LEVEL_RATE_PRICE_SQL = """
INSERT INTO price_prediction_level2_rate_price_brand_android_v1(date, product_sku_key, product_sku_name, product_level_key, 
product_level_name, product_key, product_name, product_category_id, product_category_name, product_brand_id,
product_brand_name, predict_origin, forecast_reference_price,is_new_product,POLY_pred_price,
rank,price_3,price_2,price_1,saleprice,qty,mean_rate,score,base_price,min_sale_price,sale_num,level_rate_price,avg_predict_basep_price,
predict_level_price_rate,saleprice_level_price_rate,mean_price_rate,process_price,flag,diff_rate)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,  %s,%s, %s, %s, %s, %s, %s, %s,  %s,%s, %s, %s, %s , %s,%s,
 %s, %s, %s, %s, %s, %s, %s, %s)
"""

class productLevelRate:
    def __init__(self, sql):

        # sql = SETTLE_DATA
        # 获取型号对应的skulevel聚合后的数据
        if sql is None:
            self.query_sql = SETTLE_DATA
        else:
            self.query_sql = sql

        self.data = mysql_prediction_processor.load_sql(self.query_sql)
        # 基础等级价格
        self.base_price_df = mysql_prediction_processor.load_sql(BASE_PRICE_DATA)

        self.level_rank_df = level_rank_df

        self.product_level_df = product_level_df

    def fun1(self):
        # 根据售卖数量和等级计算每个等级得分，以便后续sku 筛选:高等级的等级id较小，所以采取1000-等级id，使得，相同数量的情况下，高等级的优先选择
        self.data['score'] = 1000 - self.data.product_level_id + self.data.sale_num * 1000
        # 首先筛选出sku中 有S,A,B等级数据
        data_sku_base_level_df = self.data.loc[self.data.level_sub.isin(['S', 'A', 'B'])]

        # a= data_sku_base_level_df.groupby(by=['product_id','product_sku_id'], as_index=False)['score'].max()
        # index= data_sku_base_level_df.groupby(by=['product_id','product_sku_id'])['score'].idxmax()

        # 找出数据中score得分最高的level
        base_level = data_sku_base_level_df.loc[
            data_sku_base_level_df.sort_values(['product_sku', 'score']).drop_duplicates('product_sku',
                                                                                         keep='last').index]

        base_level.rename(columns={'price': 'price_base', 'score': 'score_base', 'sale_num': 'base_sale_num'},
                          inplace=True)

        # base_level
        # 统计出满足条件的型号下 sku的数量
        base_product_sku_size = data_sku_base_level_df[['product_id', 'product_sku_id']].groupby('product_id').agg(
            {'product_sku_id': pd.Series.nunique}).reset_index()
        base_product_sku_size.rename(columns={'product_sku_id': 'product_sku_num'}, inplace=True)

        # 给筛选出的基础数据拼接上其对应的基础价格
        base_level = base_level.merge(self.base_price_df, left_on=['product_level_name'], right_on=['level_name'])
        base_base_level_sale_num = base_level.groupby(by=['product_id'])['base_sale_num'].agg(
            {'base_level_sale_sum': 'sum'})
        base_level = base_level.merge(base_base_level_sale_num, on='product_id')
        base_level['sku_weight'] = base_level.base_sale_num / base_level.base_level_sale_sum

        # 给数据拼接上他们各自的基础信息
        df_all = self.data.merge(base_level[['product_sku', 'price_base', 'score_base', 'base_price']],
                                 on=['product_sku'])

        base_product_sale_num = df_all.groupby('product_id')['sale_num'].agg({'product_sale_sum': 'sum'}).reset_index()
        base_product_info = base_product_sku_size.merge(base_product_sale_num, left_on=['product_id'],
                                                        right_on=['product_id'])
        return base_level, base_product_info

    def fun2(self, base_level, base_product_info):
        # 给数据拼接上他们各自的基础信息
        df_all = self.data.merge(base_level[['product_sku', 'price_base', 'score_base', 'base_price', 'sku_weight',
                                             'base_level_sale_sum', 'base_sale_num']],
                                 on=['product_sku'])

        # 计算等级比率，并还原成基础价格
        df_all['level_rate'] = df_all.price / df_all.price_base
        df_all['to_base_price'] = df_all.base_price * df_all.level_rate
        df_all = df_all.loc[df_all.product_level_id > 0]
        df_all['product_level_id'] = df_all['product_level_id'].astype(int)
        # 型号维度的汇总
        df_all['product_level_id_name'] = df_all['product_id'].astype(str) + '_' + df_all['product_level_id'].astype(
            str) + '_' + df_all['product_level_name']

        # 求出汇总后的数据的，平均价格，平均售卖数量等信息
        result1 = df_all.groupby(by='product_level_id_name')['to_base_price'].agg(
            {'price_mean': 'mean', 'price_max': 'max', 'price_min': 'min', 'price_media': 'median'}).reset_index()  # 这里
        # 型号下 各个等级的 售卖数量相关信息
        cnt_res = df_all.groupby(by='product_level_id_name')['sale_num'].agg(
            {'sale_sum': 'sum', 'sku_cnt': 'count'}).reset_index()  # 这里

        df_all = df_all.merge(cnt_res, on='product_level_id_name')

        # 等级价格汇总时 ，是以此等级售卖出的数量占此等级的比例为权重，进行加权计算
        df_all['level_weight'] = df_all['sale_num'] / df_all['sale_sum']
        df_all['sku_base_level_weight'] = df_all['base_sale_num'] / df_all['base_level_sale_sum']
        df_all['zh_level_weight'] = df_all['sku_base_level_weight'] * df_all['level_weight']
        # df_all['zh_level_weight'](by='product_level_id_name').agg({'price_mean':'sum'})

        weight_res = df_all.groupby(by='product_level_id_name')['zh_level_weight'].agg(
            {'weight_sum': 'sum'}).reset_index()  # 这里

        df_all = df_all.merge(weight_res[['product_level_id_name', 'weight_sum']], on=['product_level_id_name'])
        df_all['weight'] = df_all['zh_level_weight'] / df_all.weight_sum

        df_all['to_base_price_weight'] = df_all['weight'] * df_all['to_base_price']
        result = df_all.groupby(by='product_level_id_name')['to_base_price_weight'].agg(
            {'price_mean': 'sum', 'price_max': 'sum', 'price_min': 'sum', 'price_media': 'sum'}).reset_index()  # 这里

        # resulttmp = result.merge(result1, on='product_level_id_name')

        # result=result.sort_values('price_media',ascending=False).reset_index()
        result['product_id'] = result['product_level_id_name'].str.split('_', expand=True)[0].astype(np.int64)
        result['level_id'] = result['product_level_id_name'].str.split('_', expand=True)[1].astype(np.int64)

        result=result.merge(base_product_info,how='inner',left_on=['product_id'],right_on=['product_id'])
        result=result.merge(self.level_rank_df,left_on=['level_id'],right_on=['product_level_id'])
        result.loc[result['product_id'].isin(apple_product_ids), 'product_level_order_rank'] = result.loc[
            result['product_id'].isin(apple_product_ids), 'product_level_order_rank_52']
        # result = result.merge(base_product_info, how='inner', left_on=['product_id'], right_on=['product_id'])
        # result = result.merge(self.level_rank_df, left_on=['level_id'], right_on=['product_level_id'])

        result_t = result.merge(cnt_res, on='product_level_id_name')

        # 求出汇总后的等级比率和sku占比
        result_t['rate'] = result_t.price_media / 10000

        result_t['sku_rate'] = result_t.sku_cnt / result_t.product_sku_num

        result_t = result_t.sort_values(by=['product_level_order_rank', 'product_level_id'], ascending=[True, True])
        result_t['rate_f'] = result_t['rate']
        result_t = result_t.reset_index()
        # result_t_tmp=result_t
        # size=result_t.shape[0]
        resDf = pd.DataFrame(columns=result_t.columns.tolist())
        grouped = result_t.groupby('product_id')
        for name, group in grouped:

            group = group.sort_values(by=['product_level_order_rank', 'product_level_id'], ascending=[True, True])
            group.reset_index(drop=True, inplace=True)
            size = group.shape[0]
            for index in range(size):
                result_t = group
                if index == 0:
                    if result_t.loc[index, 'sku_rate'] > 0.3:
                        continue
                    for next in range(1, size, 1):
                        if result_t.loc[next, 'sku_rate'] > 0.3:
                            break
                    # s=1.34 a=0.9 满足下列条件的为 next=4 ,针对此类差距太大的特殊处理，修改阀值，以避免此种情况
                    if (next - index) >= 2:
                        if (result_t.loc[index, 'rate_f'] - result_t.loc[next, 'rate_f']) / (next - index) > 0.05:
                            # for i in range(next-1,index,-1):
                            result_t.loc[index, 'rate_f'] = result_t.loc[next, 'rate_f'] + 0.05 * (next - index)
                    else:
                        if (result_t.loc[index, 'rate_f'] - result_t.loc[next, 'rate_f']) / (next - index) > 0.1:
                            # for i in range(next-1,index,-1):
                            result_t.loc[index, 'rate_f'] = result_t.loc[next, 'rate_f'] + 0.1 * (next - index)

                    # if result_t.loc[index,'rate_f']>1.2:
                    # result_t.loc[index, 'rate_f']=1.2
                    continue

                # if index==size-1:
                #     break
                if (result_t.loc[index, 'product_level_id_name'].find('S') > 0 \
                        or result_t.loc[index, 'product_level_id_name'].find('A') > 0 \
                        or result_t.loc[index, 'product_level_id_name'].find('B') > 0):

                    if result_t.loc[index, 'rate_f'] - result_t.loc[index - 1, 'rate_f'] >= 0 and result_t.loc[
                        index, 'sku_rate'] < 0.3:
                        result_t.loc[index, 'rate_f'] = result_t.loc[index - 1, 'rate_f'] - 0.01
                    if result_t.loc[index, 'rate_f'] - result_t.loc[index - 1, 'rate_f'] >= 0 and result_t.loc[
                        index, 'sku_rate'] >= 0.3:
                        rate_flag = result_t.loc[index - 1, 'rate_f']
                        flag_index = index - 1
                        x = index - 1
                        for x in range(index - 1, 0, -1):
                            if result_t.loc[x, 'sku_rate'] >= 0.3:
                                rate_flag = result_t.loc[x, 'rate_f']
                                flag_index = x
                                break
                        if index - x > 1 and rate_flag - result_t.loc[index, 'rate_f'] > 0:
                            rate_bulk = (rate_flag - result_t.loc[index, 'rate_f']) / (index - x)
                            for bulk in range(flag_index + 1, index - 1, 1):
                                result_t.loc[bulk, 'rate_f'] = result_t.loc[bulk + 1, 'rate_f'] - rate_bulk
                    continue

                if (result_t.loc[index, 'product_level_id_name'].find('C') > 0 \
                        or result_t.loc[index, 'product_level_id_name'].find('D') > 0 \
                        or result_t.loc[index, 'product_level_id_name'].find('E') > 0 \
                        or result_t.loc[index, 'product_level_id_name'].find('F') > 0 \
                        or result_t.loc[index, 'product_level_id_name'].find('G') > 0 \
                        or result_t.loc[index, 'product_level_id_name'].find('H') > 0 \
                        or result_t.loc[index, 'product_level_id_name'].find('I') > 0 \
                        or result_t.loc[index, 'product_level_id_name'].find('J') > 0
                        or result_t.loc[index, 'product_level_id_name'].find('K') > 0):

                    if result_t.loc[index, 'rate_f'] - result_t.loc[index - 1, 'rate_f'] >= 0 and result_t.loc[
                        index, 'sku_rate'] < 0.2:
                        result_t.loc[index, 'rate_f'] = result_t.loc[index - 1, 'rate_f'] + (
                                result_t.loc[index, 'rate_f'] - result_t.loc[
                            index - 1, 'rate_f']) * 0.67  # 等级比率超出的范围 打个0.67折，防止计算的等级比率过于偏大

                    if result_t.loc[index, 'rate_f'] - result_t.loc[index - 1, 'rate_f'] >= 0 and result_t.loc[
                        index, 'sku_rate'] >= 0.2:
                        rate_flag = result_t.loc[index - 1, 'rate_f']
                        flag_index = index - 1
                        x = index - 1
                        for x in range(index - 1, 0, -1):
                            if result_t.loc[x, 'sku_rate'] >= 0.2:
                                rate_flag = result_t.loc[x, 'rate_f']
                                flag_index = x
                                break
                        if index - x > 1 and rate_flag - result_t.loc[index, 'rate_f'] > 0:
                            rate_bulk = (rate_flag - result_t.loc[index, 'rate_f']) / (index - x)
                            for bulk in range(flag_index + 1, index - 1, 1):
                                result_t.loc[bulk, 'rate_f'] = result_t.loc[bulk + 1, 'rate_f'] - rate_bulk
                    continue

                # if result_t.loc[index, 'rate_f']-result_t.loc[index-1,'rate_f']>0 \
                #     and result_t.loc[index, 'sku_rate']<0.3:
                #     result_t.loc[index, 'rate_f'] = result_t.loc[index - 1, 'rate_f'] - 0.02
            resDf = resDf.append(group.copy())
        resDf['product_id'] = resDf.product_id.astype(np.int64)
        resDf['level_id'] = resDf.level_id.astype(np.int64)
        product_level_df = self.product_level_df.merge(resDf, how='left', left_on=['product_id', 'level_id'],
                                                       right_on=['product_id', 'level_id'])

        product_level_df=product_level_df.merge(self.level_rank_df,left_on=['level_id'],right_on=['product_level_id'])
        result.loc[result['product_id'].isin(apple_product_ids), 'product_level_order_rank'] = result.loc[
            result['product_id'].isin(apple_product_ids), 'product_level_order_rank_52']
        product_level_df=product_level_df.sort_values(['product_id','product_level_order_rank_y'],ascending=True)
        product_level_df = product_level_df.merge(self.level_rank_df, left_on=['level_id'],
                                                  right_on=['product_level_id'])
        product_level_df = product_level_df.sort_values(['product_id', 'product_level_order_rank_y'], ascending=True)

        # print(1)
        return product_level_df


def fun3(process_df, rate_f='mean_rate', w='w_mean'):
    process_df = process_df.reset_index()
    process_df[rate_f] = process_df[rate_f].fillna(-1)
    resDf = pd.DataFrame(columns=process_df.columns.tolist())

    grouped = process_df.groupby('product_id')
    for name, group in grouped:
        size = group.shape[0]
        group.reset_index(drop=True, inplace=True)
        for i in range(size):
            if group.loc[i, rate_f] == -1:
                if i == 0:
                    next = 0
                    for next in range(i + 1, size, 1):
                        if group.loc[next, rate_f] > 0:
                            break
                    if next==(size-1) and group.loc[next, rate_f]==-1 and i==0:
                        group.loc[i, rate_f]=1
                    else:
                        group.loc[i, rate_f] = group.loc[next, rate_f] + next * 0.03
                    continue
                if i > 0:
                    next = size
                    for next in range(i + 1, size, 1):
                        if group.loc[next, rate_f] > 0 and group.loc[next, w] >= 0.5:
                            break
                    if next < size - 1:
                        group.loc[i, rate_f] = group.loc[i - 1, rate_f] - (
                                group.loc[i - 1, rate_f] - group.loc[next, rate_f]) / (next - i + 1)

                    else:
                        varience = (group.loc[i - 1, rate_f] - 0.1) / (next - i + 1)
                        group.loc[i, rate_f] = group.loc[i - 1, rate_f] - 0.05 - varience
                    continue
            else:
                continue
        resDf = resDf.append(group.copy())
    print(1)
    return resDf


def fun33(process_df, rate_f='mean_rate', w='w_mean', template_rate='template_mean_rate'):
    process_df = process_df.reset_index()
    process_df[rate_f] = process_df[rate_f].fillna(-1)

    resDf = pd.DataFrame(columns=process_df.columns.tolist())

    # resDf = pd.DataFrame(columns=price_df.columns.tolist())
    # price_df = price_df.loc[price_df.product_key.isin([32290])]
    grouped = process_df.groupby(['product_id'])

    total_gp = grouped.ngroups
    iter=0
    t1 = time.time()
    if total_gp>5:
        with Pool(cpu_worker_num)  as executor:
            for number, group in zip(grouped,
                                      executor.map(fun33_fun,
                                                   grouped,)):
                resDf = resDf.append(group.copy())
                iter += 1
                logger.info('total_gp = {} ,iter={} productid={} is prime:'.format(total_gp, iter, number[0]))
        logger.info(' fun33 end use time @{}'.format((time.time() - t1) / 60))
    else:
        for group in grouped:
            group = fun33_fun(group)
            resDf = resDf.append(group.copy())
    resDf = resDf.drop_duplicates()

    # grouped = process_df.groupby('product_id')
    # for name, group in grouped:
    #     size = group.shape[0]
    #     group.reset_index(drop=True, inplace=True)
    #     for i in range(size):
    #         if group.loc[i, rate_f] == -1:
    #             if i == 0:
    #
    #                 for next in range(i + 1, size, 1):
    #                     if group.loc[next, rate_f] > 0:
    #                         break
    #                 tem_rate = group.loc[i, template_rate]
    #                 if tem_rate > group.loc[next, rate_f]:
    #                     group.loc[i, rate_f] = group.loc[i, template_rate]
    #                 else:
    #                     group.loc[i, rate_f] = group.loc[next, rate_f] + next * 0.03
    #                 continue
    #             if i > 0:
    #                 next = size
    #                 for next in range(i + 1, size, 1):
    #                     if group.loc[next, rate_f] > 0 and group.loc[next, w] >= 0.5:
    #                         break
    #                 if next < size - 1:
    #                     tem_rate = group.loc[i, template_rate]
    #                     if tem_rate > group.loc[next, rate_f] and tem_rate <= group.loc[i - 1, rate_f]:
    #                         group.loc[i, rate_f] = group.loc[i, template_rate]
    #                     else:
    #                         group.loc[i, rate_f] = group.loc[i - 1, rate_f] - (
    #                                 group.loc[i - 1, rate_f] - group.loc[next, rate_f]) / (next - i + 1)
    #
    #                 else:
    #                     tem_rate = group.loc[i, template_rate]
    #                     if tem_rate < group.loc[i - 1, rate_f]:
    #                         group.loc[i, rate_f] = group.loc[i, template_rate]
    #                     else:
    #                         # varience = (group.loc[i - 1, rate_f] - 0.1) / (next - i + 1)
    #                         # group.loc[i, rate_f] = group.loc[i - 1, rate_f]-0.05-varience
    #                         rate = min(max(0.8, group.loc[i, template_rate] / group.loc[i - 1, template_rate]), 0.99)
    #                         group.loc[i, rate_f] = group.loc[i - 1, rate_f] * rate
    #                 continue
    #         else:
    #             continue
    #     resDf = resDf.append(group.copy())
    print(1)
    return resDf

def fun33_fun(group ):
    # process_df = process_df.reset_index()
    # process_df[rate_f] = process_df[rate_f].fillna(-1)
    # resDf = pd.DataFrame(columns=process_df.columns.tolist())

    # grouped = process_df.groupby('product_id')
    rate_f = 'mean_rate'
    w = 'w_mean'
    template_rate = 'template_mean_rate'
    group = group[1]
    # size = group.shape[0]
    # logger.info('process_level_rate_price_fun is run size={}'.format(size))
    # if group.mean_rate.min() <= 0:
    #     return group

    # group = group.sort_values(['product_level_order_rank', 'rank', 'level_id', 'mean_rate', ],
    #                           ascending=[True, True, True, False])

    # group['levelname']=group.product_level_name
    # group.reset_index(drop=True, inplace=True)

    # for name, group in grouped:
    size = group.shape[0]
    group.reset_index(drop=True, inplace=True)
    if size<=1:
        return group
    if group.loc[group.template_brand.isin(
            ['322_1', '172_15', '172_365', '248_16', '283_460', '322_1', '322_10', '322_103', '322_11', '322_12',
             '322_14', '322_15', '322_16', '322_17'])].shape[0] > 0:
        print(group.loc[0, 'template_brand'])
    for i in range(size):
        if group.loc[i, rate_f] == -1:
            if i == 0:
                next=0
                for next in range(i + 1, size, 1):
                    if group.loc[next, rate_f] > 0:
                        break
                tem_rate = group.loc[i, template_rate]
                if tem_rate > group.loc[next, rate_f]:
                    group.loc[i, rate_f] = group.loc[i, template_rate]
                else:
                    group.loc[i, rate_f] = group.loc[next, rate_f] + next * 0.03
                continue
            if i > 0:
                next = size
                for next in range(i + 1, size, 1):
                    if group.loc[next, rate_f] > 0 and group.loc[next, w] >= 0.5:
                        break
                if next < size - 1:
                    tem_rate = group.loc[i, template_rate]
                    if tem_rate > group.loc[next, rate_f] and tem_rate <= group.loc[i - 1, rate_f]:
                        group.loc[i, rate_f] = group.loc[i, template_rate]
                    else:
                        group.loc[i, rate_f] = group.loc[i - 1, rate_f] - (
                                group.loc[i - 1, rate_f] - group.loc[next, rate_f]) / (next - i + 1)

                else:
                    tem_rate = group.loc[i, template_rate]
                    if tem_rate < group.loc[i - 1, rate_f]:
                        group.loc[i, rate_f] = group.loc[i, template_rate]
                    else:
                        # varience = (group.loc[i - 1, rate_f] - 0.1) / (next - i + 1)
                        # group.loc[i, rate_f] = group.loc[i - 1, rate_f]-0.05-varience
                        rate = min(max(0.8, group.loc[i, template_rate] / group.loc[i - 1, template_rate]), 0.99)
                        group.loc[i, rate_f] = group.loc[i - 1, rate_f] * rate
                continue
        else:
            continue
        # resDf = resDf.append(group.copy())
    # print(1)
    return group

def save_complement_data(predict_data):
    """
    保存补全数据
    :return:
    """
    if predict_data is not None:
        logger.info('saving predict data...')
        dt = datetime.datetime.strptime(time.strftime('%Y-%m-%d'), '%Y-%m-%d') + datetime.timedelta(days=1)
        predict_data['date'] = dt.strftime('%Y-%m-%d')
        predict_data = predict_data.fillna(-1)
        predict_data['predict_level_price_rate'] = predict_data['predict_level_price_rate'].apply(lambda x: round(x, 4))
        predict_data['saleprice_level_price_rate'] = predict_data['saleprice_level_price_rate'].apply(
            lambda x: round(x, 4))
        predict_data['mean_price_rate'] = predict_data['mean_price_rate'].apply(lambda x: round(x, 4))
        predict_data['diff_rate'] = predict_data['diff_rate'].apply(lambda x: round(x, 4))
        predict_data['saleprice_level_price_rate'] = predict_data['saleprice_level_price_rate'].apply(
            lambda x: round(x, 4))
        predict_data['process_price'] = predict_data['process_price'].apply(lambda x: round(x, 4))
        predict_data['POLY_pred_price'] = predict_data['process_price'].apply(lambda x: round(x, 0))
        predict_data['diff_rate'][np.isinf(predict_data['diff_rate'])] = -1
        predict_data = predict_data.fillna(-1)
        predict_data = predict_data.replace([np.inf, -np.inf], -1)
        delete_rate_sql = """
        delete from price_prediction_level2_rate_price_brand_android_v1 where date=DATE_FORMAT(date_sub(curdate(),interval 10 day),'%Y%m%d') 
        """
        mysql_prediction_processor.execute_sql(delete_rate_sql)
        delete_td_rate_sql = """
                delete from price_prediction_level2_rate_price_brand_android_v1 where date=DATE_FORMAT(date_add(curdate(),interval 1 day),'%Y%m%d') 
                """
        mysql_prediction_processor.execute_sql(delete_td_rate_sql)
        insert_sql = """
        INSERT INTO price_prediction_level2_rate_price_brand_android_v1(date, product_sku_key, product_sku_name, product_level_key, 
        product_level_name, product_key, product_name, product_category_id, product_category_name, product_brand_id,
        product_brand_name, predict_origin, forecast_reference_price,is_new_product,POLY_pred_price,
        rank,price_3,price_2,price_1,saleprice,qty,mean_rate,score,base_price,min_sale_price,sale_num,level_rate_price,avg_predict_basep_price,
        predict_level_price_rate,saleprice_level_price_rate,mean_price_rate,process_price,flag,diff_rate)
        VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,  %s,%s, %s, %s, %s, %s, %s, %s,  %s,%s, %s, %s, %s , %s,%s,
         %s, %s, %s, %s, %s, %s, %s, %s)
        """
        # predict_data=predict_data.reset_index()
        mysql_prediction_processor.execute_insert_sql(insert_sql,
                                                      predict_data[
                                                          ['date', 'product_sku_key', 'product_sku_name',
                                                           'product_level_key',
                                                           'product_level_name', 'product_key', 'product_name',
                                                           'product_category_id', 'product_category_name',
                                                           'product_brand_id',
                                                           'product_brand_name', 'predict_origin',
                                                           'forecast_reference_price', 'is_new_product',
                                                           'POLY_pred_price',
                                                           'rank', 'price_3', 'price_2', 'price_1', 'saleprice', 'qty',
                                                           'mean_rate', 'score',
                                                           'base_price', 'min_sell_price', 'sale_num',
                                                           'level_rate_price', 'avg_predict_basep_price',
                                                           'predict_level_price_rate', 'saleprice_level_price_rate',
                                                           'mean_price_rate',
                                                           'process_price', 'flag', 'diff_rate'
                                                           ]
                                                      ].to_records(index=False).tolist())
        logger.info('saving predict data to mysql done')
        mysql_price_num = predict_data.shape[0]
        # save_data_to_gp(predict_data)
        return mysql_price_num


def save_product_level_rate(product_level_rate):
    """
    保存补全数据
    :return:
    """
    if product_level_rate is not None:
        logger.info('saving predict data...')
        dt = datetime.datetime.strptime(time.strftime('%Y-%m-%d'), '%Y-%m-%d') + datetime.timedelta(days=1)
        product_level_rate['date'] = dt.strftime('%Y-%m-%d')
        product_level_rate = product_level_rate.fillna(-1)
        product_level_rate['rate_f1'] = product_level_rate['rate_f1'].apply(lambda x: round(x, 5))
        product_level_rate['rate_f2'] = product_level_rate['rate_f2'].apply(lambda x: round(x, 5))
        product_level_rate['rate_f3'] = product_level_rate['rate_f3'].apply(lambda x: round(x, 5))
        product_level_rate['sku_rate1'] = product_level_rate['sku_rate1'].apply(lambda x: round(x, 5))
        product_level_rate['sku_rate2'] = product_level_rate['sku_rate2'].apply(lambda x: round(x, 5))
        product_level_rate['sku_rate3'] = product_level_rate['sku_rate3'].apply(lambda x: round(x, 5))
        product_level_rate['mean_rate'] = product_level_rate['mean_rate'].apply(lambda x: round(x, 5))
        product_level_rate['template_mean_rate'] = product_level_rate['template_mean_rate'].apply(lambda x: round(x, 5))
        product_level_rate['mean_rate_tmp'] = product_level_rate['mean_rate_tmp'].apply(lambda x: round(x, 5))

        delete_rate_sql = """
        delete from product_sku2_level_rate_final_brand_android_v1 where date=DATE_FORMAT(date_sub(curdate(),interval 10 day),'%Y%m%d')
        """
        mysql_prediction_processor.execute_sql(delete_rate_sql)
        delete_td_rate_sql = """
                delete from product_sku2_level_rate_final_brand_android_v1 where date=DATE_FORMAT(date_add(curdate(),interval 1 day),'%Y%m%d') 
                """
        mysql_prediction_processor.execute_sql(delete_td_rate_sql)
        insert_sql = """
        INSERT INTO product_sku2_level_rate_final_brand_android_v1(date, template_brand, product_id, level_id, level_name,
        product_name, sku_rate1, rate_f1, sku_rate2, rate_f2, sku_rate3,
        rate_f3, w_f1, w_f2,w_f3,mean_rate,
        w_mean,template_mean_rate,mean_rate_tmp)
        VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,  %s,%s, %s, %s, %s, %s, %s, %s)
        """

        mysql_prediction_processor.execute_insert_sql(insert_sql,
                                                      product_level_rate[
                                                          ['date', 'template_brand', 'product_id', 'level_id',
                                                           'product_level_name', 'product_name',
                                                           'sku_rate1', 'rate_f1', 'sku_rate2', 'rate_f2', 'sku_rate3',
                                                           'rate_f3',
                                                           'w_f1', 'w_f2', 'w_f3', 'mean_rate', 'w_mean',
                                                           'template_mean_rate', 'mean_rate_tmp']
                                                      ].to_records(index=False).tolist())
        logger.info('saving predict data to mysql done')
        mysql_price_num = product_level_rate.shape[0]
        return mysql_price_num


def merge_avg_product_level_rate(product_level_rate):
    # tmp=product_level_rate
    product_avg_level_rate_df = mysql_prediction_processor.load_sql(product_avg_level_rate_sql)
    product_level_rate.rename(columns={'mean_rate': 'mean_rate_td', 'template_mean_rate': 'template_mean_rate_td'},
                              inplace=True)
    product_avg_level_rate_df['product_id'] = product_avg_level_rate_df.product_id.astype(np.int64)
    product_level_rate['product_id'] = product_level_rate.product_id.astype(np.int64)
    product_avg_level_rate_df['level_id'] = product_avg_level_rate_df.level_id.astype(np.int64)
    product_level_rate['level_id'] = product_level_rate.level_id.astype(np.int64)

    product_level_rate = product_level_rate.merge(product_avg_level_rate_df, left_on=['product_id', 'level_id'],
                                                  right_on=['product_id', 'level_id'])
    product_level_rate = product_level_rate.fillna(-1)
    return product_level_rate


def process_product_level_rate(sql):
    model = productLevelRate(sql)
    base_level, base_product_info = model.fun1()
    product_level_df = model.fun2(base_level, base_product_info)
    # product_level_df=model.fun3(product_level_df)
    product_level_df.drop_duplicates(subset=['product_id', 'level_id'], keep='first', inplace=True)
    return product_level_df


def process_level_rate_final():
    product_level_df1 = process_product_level_rate(SETTLE_DATA)
    product_level_df2 = process_product_level_rate(SETTLE_DATA_2)
    product_level_df3 = process_product_level_rate(SETTLE_DATA_3)

    tmp1 = product_level_df1[['template_brand', 'product_id', 'level_id', 'product_name', 'sku_rate', 'rate_f']]
    tmp1.rename(columns={'rate_f': 'rate_f1', 'sku_rate': 'sku_rate1'}, inplace=True)
    tmp2 = product_level_df2[['product_id', 'level_id', 'sku_rate', 'rate_f']]
    tmp2.rename(columns={'rate_f': 'rate_f2', 'sku_rate': 'sku_rate2'}, inplace=True)

    tmp3 = product_level_df3[['product_id', 'level_id', 'sku_rate', 'rate_f']]
    tmp3.rename(columns={'rate_f': 'rate_f3', 'sku_rate': 'sku_rate3'}, inplace=True)

    tmp1 = tmp1.merge(tmp2, left_on=['product_id', 'level_id'], right_on=['product_id', 'level_id'])
    tmp1 = tmp1.merge(tmp3, on=['product_id', 'level_id'])
    # tmp1['mean_rate']=tmp1[['rate_f1','rate_f2','rate_f3']].mean(axis=1)

    tmp1.loc[tmp1.rate_f1 > 0, 'w_f1'] = 0.5
    tmp1.loc[tmp1.rate_f2 > 0, 'w_f2'] = 0.3
    tmp1.loc[tmp1.rate_f3 > 0, 'w_f3'] = 0.2

    tmp1['rate_f1_w'] = tmp1.w_f1 * tmp1.rate_f1
    tmp1['rate_f2_w'] = tmp1.w_f2 * tmp1.rate_f2
    tmp1['rate_f3_w'] = tmp1.w_f3 * tmp1.rate_f3
    tmp1['mean_rate'] = tmp1[['rate_f1_w', 'rate_f2_w', 'rate_f3_w']].sum(axis=1) / tmp1[['w_f1', 'w_f2', 'w_f3']].sum(
        axis=1)
    tmp1['w_mean'] = tmp1[['w_f1', 'w_f2', 'w_f3']].sum(axis=1)
    template_level_rate,not_hot_product = process_android_template_level_rate_final()
    template_level_rate.rename(columns={'mean_rate': 'template_mean_rate'}, inplace=True)
    template_level_rate.level_id = template_level_rate['level_id'].astype(np.int64)
    level_rate_merge_df = pd.merge(tmp1, template_level_rate[['template_brand', 'level_id', 'template_mean_rate','w_mean_template']],
                                   left_on=['template_brand', 'level_id'], right_on=['template_brand', 'level_id'])

    level_rate_merge_df.loc[level_rate_merge_df.product_id.isin(not_hot_product),'w_mean']=level_rate_merge_df.loc[level_rate_merge_df.product_id.isin(not_hot_product),'w_mean_template']
    level_rate_merge_df.loc[level_rate_merge_df.product_id.isin(not_hot_product),'mean_rate']=level_rate_merge_df.loc[level_rate_merge_df.product_id.isin(not_hot_product),'template_mean_rate']

    level_rate_merge_df['mean_rate_tmp'] = level_rate_merge_df.mean_rate
    level_rate_merge_df['mean_rate'] = level_rate_merge_df['mean_rate'].fillna(-1)
    # product_lists = level_rate_merge_df.loc[level_rate_merge_df.mean_rate == -1, 'product_id'].drop_duplicates()[:100].values.tolist()
    # level_rate_merge_df=level_rate_merge_df.loc[level_rate_merge_df.product_id.isin(product_lists)]
    result_df = fun33(level_rate_merge_df)
    # result_df_tmp=fun33(level_rate_merge_df)

    result_df[['level_id']] = result_df[['level_id']].apply(
        np.int64)
    result_df = result_df.merge(level_rank_df, left_on='level_id', right_on='product_level_id')
    result_df.to_csv('/data/sunpengwei/tmp/product_level_rate_android.csv', encoding='utf-8-sig')
    return result_df


def process_level_rate_price_fun(group):
    group = group[1]
    size = group.shape[0]
    # logger.info('process_level_rate_price_fun is run size={}'.format(size))
    if group.mean_rate.min() <= 0:
        return group

    group = group.sort_values(['product_level_order_rank', 'rank', 'level_id', 'mean_rate', ],
                              ascending=[True, True, True, False])

    # group['levelname']=group.product_level_name
    group.reset_index(drop=True, inplace=True)
    # 解决S与A+倒挂的问题 2021-08-20：去掉min_sell_price 逻辑
    if group.loc[0, 'product_level_name'] == 'S':
        flag_a = group.loc[group.product_level_name == 'A+']
        if not flag_a.empty:
            a_inndex = flag_a.index[0]
            if flag_a.loc[a_inndex, 'sale_num'] > 0:
                for idex in range(a_inndex - 1, -1, -1):
                    if group.loc[idex, 'level_rate_price'] < group.loc[
                        idex + 1, 'level_rate_price']:  # 此处level_rate_price 已经替换为最近30天最小出货价 :现已替换为贴近最近4个价格段的出货价
                        process_price = group.loc[idex + 1, 'level_rate_price'] / group.loc[idex + 1, 'mean_rate'] * \
                                        group.loc[idex, 'mean_rate']
                        group.loc[idex, 'level_rate_price'] = process_price
                        group.loc[idex, 'process_price'] = process_price

    for i in range(size):
        predict_rate = group.loc[i, 'predict_level_price_rate']
        saleprice_rate = group.loc[i, 'saleprice_level_price_rate']
        mean_price_rate = group.loc[i, 'mean_price_rate']
        level_sub = group.loc[i, 'level_sub']
        n_p = 1
        if group.loc[i, 'forecast_reference_price'] - group.loc[i, 'level_rate_price'] < 0:
            n_p = -1

        # qty = group.loc[i, 'qty']
        if level_sub in ['S', 'A', 'B'] and predict_rate > 0.03:
            if i == 0:  # 第一个等级大于异常范围时，如果人工价格大于预测价的下一个等级，则取人工价，否则取下个等级的预测价 *1.05
                pass
            elif predict_rate > saleprice_rate:
                if saleprice_rate < 0.03:
                    if saleprice_rate > mean_price_rate:
                        group.loc[i, 'process_price'] = group.loc[i, 'avg_predict_basep_price']
                    else:

                        group.loc[i, 'process_price'] = group.loc[i, 'saleprice']
                else:
                    # if qty>0:#售卖数量大于0，则基准价可信，
                    #     group.loc[i, 'process_price'] = group.loc[i, 'saleprice']*0.95
                    # else:
                    group.loc[i, 'process_price'] = group.loc[i, 'level_rate_price'] * (1 + 0.03 * n_p)
            else:
                # if qty > 0:
                #     group.loc[i, 'process_price'] = group.loc[i, 'saleprice'] * 0.95
                # else:
                group.loc[i, 'process_price'] = group.loc[i, 'level_rate_price'] * (1 + 0.03 * n_p)
            group.loc[i, 'flag'] = 1

        elif level_sub in ['C', 'D', 'E'] and predict_rate > 0.06:
            if i == 0:
                group.loc[i, 'process_price'] = group.loc[i, 'saleprice']
            elif predict_rate > saleprice_rate:
                if saleprice_rate < 0.06:
                    if saleprice_rate > mean_price_rate:
                        group.loc[i, 'process_price'] = group.loc[i, 'avg_predict_basep_price']
                    else:

                        group.loc[i, 'process_price'] = group.loc[i, 'saleprice']
                else:
                    # if qty>0:
                    #     group.loc[i, 'process_price'] = group.loc[i, 'saleprice']*0.95
                    # else:
                    group.loc[i, 'process_price'] = group.loc[i, 'level_rate_price'] * (1 + 0.05 * n_p)
            else:
                # if qty > 0:
                #     group.loc[i, 'process_price'] = group.loc[i, 'saleprice'] * 0.95
                # else:
                group.loc[i, 'process_price'] = group.loc[i, 'level_rate_price'] * (1 + 0.05 * n_p)
            group.loc[i, 'flag'] = 1
        elif level_sub not in ['S', 'A', 'B', 'C', 'D', 'E'] and predict_rate > 0.12:
            if i == 0:
                group.loc[i, 'process_price'] = group.loc[i, 'saleprice']
            elif predict_rate > saleprice_rate:
                if saleprice_rate < 0.1:
                    if saleprice_rate > mean_price_rate:
                        group.loc[i, 'process_price'] = group.loc[i, 'avg_predict_basep_price']
                    else:

                        group.loc[i, 'process_price'] = group.loc[i, 'saleprice']
                else:
                    # if qty > 0:
                    #     group.loc[i, 'process_price'] = group.loc[i, 'saleprice'] * 0.95
                    # else:
                    group.loc[i, 'process_price'] = group.loc[i, 'level_rate_price'] * (1 + 0.07 * n_p)
            else:
                # if qty > 0:
                #     group.loc[i, 'process_price'] = group.loc[i, 'saleprice'] * 0.95
                # else:
                group.loc[i, 'process_price'] = group.loc[i, 'level_rate_price'] * (1 + 0.07 * n_p)
            group.loc[i, 'flag'] = 1
        # print('dsd')
    group['process_price_f'] = group['process_price']
    # logger.info('group product_sku_key:={}'.format(group.loc[0, 'product_sku_key']))
    for i in range(size):
        level_sub = group.loc[i, 'level_sub']

        if i > 0 and i == size - 1:  # bugfix:J,B排序时，走最后一步时出现 数组越界
            if group.loc[i, 'process_price_f'] > group.loc[i - 1, 'process_price_f']:
                group.loc[i, 'process_price_f'] = group.loc[i - 1, 'process_price_f'] * 0.98
            continue

        if i > 0 and level_sub in ['S', 'A', 'B'] and group.loc[i, 'process_price_f'] < group.loc[
            i + 1, 'process_price_f']:
            # if group.loc[i, 'process_price_f']>group.loc[i-1, 'process_price_f']:
            #     group.loc[i, 'process_price_f']=group.loc[i-1, 'process_price_f']-10

            if group.loc[i, 'process_price_f'] < group.loc[i - 1, 'process_price_f']:
                next = i + 1
                # for next in range(i+1,size):
                #     if group.loc[next, 'flag']!=1 or level_sub not in ['S','A','B']:
                #         break
                if next < size - 1:
                    group.loc[i, 'process_price_f'] = group.loc[next, 'process_price_f'] + (
                            group.loc[i - 1, 'process_price_f'] - group.loc[next, 'process_price_f']) / (
                                                              next - i + 1)
                else:
                    group.loc[i, 'process_price_f'] = group.loc[i - 1, 'process_price_f'] * 0.98
    group['diff_rate'] = group['process_price_f'] / group['saleprice'] - 1

    if group.loc[0, 'product_level_name'] == 'S':
        """
        1、用模型算出S等级价格，42天内S级出货数量≥10台，S等级上限阈值《A2*1.5；
        2、用模型算出S等级价格，42天内S级出货数量＜10台，S等级上限阈值《A2*1.2；

        2021-10-21 新逻辑修改：
        标准业务变更，取消A2等级，现有逻辑调整为A+
        """
        flag_a2 = group.loc[group.product_level_name == 'A+']
        if not flag_a2.empty:
            a2_inndex = flag_a2.index[0]
        else:
            a2_inndex = 1

        for idex in range(a2_inndex - 1, -1, -1):
            # if group.loc[idex, 'product_level_name'] == 'A+':
            #     group.loc[idex, 'process_price'] = group.loc[a2_inndex, 'process_price'] * 1.01
            # elif
            if group.loc[idex, 'product_level_name'] == 'S':
                rate = group.loc[idex, 'process_price'] / group.loc[a2_inndex, 'process_price']
                if group.loc[idex, 'sale_num'] >= 10 and rate > 1.5:
                    group.loc[idex, 'process_price'] = group.loc[a2_inndex, 'process_price'] * 1.5

                elif group.loc[idex, 'sale_num'] < 10 and rate > 1.2:
                    group.loc[idex, 'process_price'] = group.loc[a2_inndex, 'process_price'] * 1.2

    return group


def predict_price_data(result_df, flag=True):
    t1 = time.time()
    if flag:
        price_df = mysql_prediction_processor.load_sql(predict_price_sql)
        price_df['product_id'] = price_df.product_id.astype(np.int64)
        price_df['product_level_key'] = price_df.product_level_key.astype(np.int64)
        price_df['product_sku_key'] = price_df.product_sku_key.astype(np.int64)

        price_df[['product_id', 'product_level_key', 'product_sku_key']] = price_df[
            ['product_id', 'product_level_key', 'product_sku_key']].apply(
            np.int64)
        price_df = price_df.merge(level_rank_df[['product_level_id', 'product_level_order_rank']],
                                  left_on=['product_level_key'], right_on=['product_level_id'])
        # 获取skulevel，按照三个七天进行价格段的处理
        td = format_date_string(get_today())
        period_data = get_period_price_fun(td, flag=True)
        period_data = period_data.loc[period_data.thisprice > 0]
        price_df = price_df.merge(period_data, how="left",
                                  left_on=['product_sku_key', 'product_level_key'],
                                  right_on=['product_sku_key', 'product_level_key'])
        price_df.loc[~pd.isnull(price_df.thisprice) & (price_df.thisprice > 0), 'avg_sell_price'] = price_df.loc[
            ~pd.isnull(price_df.thisprice) & (price_df.thisprice > 0), 'thisprice']
        price_df = price_df.fillna(0)
        save_pickle_data(FILE_DIR + "predict_price_data.pkl", price_df)
    else:
        price_df = load_pickle_data(FILE_DIR + "predict_price_data.pkl")


    # 测试
    # price_df['forecast_reference_price'] = price_df['predict_origin']
    # result_df['product_id'] = result_df.product_id.astype(np.int64)
    # result_df['level_id'] = result_df.level_id.astype(np.int64)
    # price_df=price_df.loc[price_df.product_sku_key == 6889398]
    result_df[['product_id', 'level_id']] = result_df[['product_id', 'level_id']].apply(np.int64)
    price_df = price_df.merge(result_df[['product_id', 'level_id', 'mean_rate']],
                              left_on=['product_id', 'product_level_key'], right_on=['product_id', 'level_id'])

    price_df['score'] = 1000 - price_df.product_level_key + price_df.sale_num * 1000
    data_sku_base_level_df = price_df.loc[price_df.level_sub.isin(['A', 'B', 'C'])]

    # 找出数据中score得分最高的level
    base_level = data_sku_base_level_df.loc[
        data_sku_base_level_df.sort_values(['product_sku', 'score']).drop_duplicates('product_sku', keep='last').index]
    base_level['base_price'] = base_level['avg_sell_price'] / base_level['mean_rate']

    price_df = price_df.merge(base_level[['product_sku', 'base_price']], on='product_sku')
    price_df.loc[price_df.product_level_name.isin(['S']), 'forecast_reference_price'] = price_df.loc[
        price_df.product_level_name.isin(['S'])]. \
        apply(lambda row: s_min_max_range_fun(row['product_level_name'], row['min_week_price'], row['max_week_price'],
                                              row['forecast_reference_price']), axis=1)

    price_df['forecast_reference_price'] = price_df.apply(
        lambda row: sale_num_price_fun(row['sale_num_0_7'], row['price_0_7'], row['thisprice'],
                                       row['forecast_reference_price']), axis=1)  # 贴近最近7天出货价的逻辑

    price_df['level_rate_price'] = price_df.base_price * price_df.mean_rate
    price_df.loc[price_df.product_level_name.isin(['S']), 'level_rate_price'] = price_df.loc[
        price_df.product_level_name.isin(['S'])]. \
        apply(lambda row: s_min_max_range_fun(row['product_level_name'], row['min_week_price'], row['max_week_price'],
                                              row['level_rate_price']), axis=1)  # 贴近最近7天出货价的逻辑:为了避免等级比率对价格的修改

    price_df['level_rate_price'] = price_df.apply(
        lambda row: sale_num_price_fun(row['sale_num_0_7'], row['price_0_7'], row['thisprice'],
                                       row['level_rate_price']), axis=1)

    price_df['avg_predict_basep_price'] = price_df[['saleprice', 'forecast_reference_price']].mean(axis=1)

    price_df['predict_level_price_rate'] = abs(
        price_df['forecast_reference_price'] - price_df['level_rate_price']) / price_df.level_rate_price
    price_df['saleprice_level_price_rate'] = abs(
        price_df['saleprice'] - price_df['level_rate_price']) / price_df.level_rate_price
    price_df['mean_price_rate'] = abs(
        price_df['avg_predict_basep_price'] - price_df['level_rate_price']) / price_df.level_rate_price
    price_df['qty'] = price_df['qty'].astype(np.int64)

    price_df['process_price'] = price_df['forecast_reference_price']
    price_df['process_price_f'] = price_df['forecast_reference_price']
    price_df['flag'] = 0
    price_df['diff_rate'] = price_df['saleprice_level_price_rate']

    resDf = pd.DataFrame(columns=price_df.columns.tolist())
    # price_df = price_df.loc[price_df.product_key.isin([58309])] #test
    grouped = price_df.groupby(['product_id', 'product_sku_key'])

    total_gp = grouped.ngroups
    iter=0
    if total_gp>5:
        with Pool(cpu_worker_num)  as executor:
            for number, group in zip(grouped,
                                      executor.map(process_level_rate_price_fun,
                                                   grouped,)):
                resDf = resDf.append(group.copy())
                iter += 1
                logger.info('total_gp = {} ,iter={} skuid={} is prime:'.format(total_gp, iter, number[0]))
        logger.info(' inverse_rate_process end use time @{}'.format((time.time() - t1) / 60))
    else:
        for group in grouped:
            group = process_level_rate_price_fun(group)
            resDf = resDf.append(group.copy())
    resDf = resDf.drop_duplicates()
    logger.info('process predict_price_data use time @{}'.format((time.time() - t1) / 60))
    return resDf


def insert_data_to_incre(date):
    dt = check_date_str(date) + datetime.timedelta(days=1)
    # today = datetime.date.today();
    today = format_date_string(dt)
    query_sql = """select DATE_FORMAT(date_add(curdate(),interval 1 day),'%Y%m%d') as date, product_sku_key, product_sku_name, product_level_key, 
                        product_level_name, product_key, product_name, product_category_id, product_category_name, product_brand_id,
                        product_brand_name, predict_origin, forecast_reference_price,is_new_product,POLY_pred_price,
                        rank,price_3,price_2,price_1,saleprice,qty,mean_rate,score,base_price,min_sale_price as min_sell_price,sale_num,level_rate_price,avg_predict_basep_price,
                        predict_level_price_rate,saleprice_level_price_rate,mean_price_rate,process_price,flag,diff_rate 
                        from price_prediction_level2_rate_price_brand_ljc where date=DATE_FORMAT(date_add(curdate(),interval 0 day),'%Y%m%d') """
    lsql = query_sql
    df_all_data = mysql_prediction_processor.load_sql(lsql)
    if (df_all_data.shape[0] > 0):
        mysql_prediction_processor.execute_sql(
            "delete from price_prediction_level2_rate_price_brand_ljc where date=DATE_FORMAT(date_add(curdate(),interval 1 day),'%Y%m%d')")

        # df_all_data['create_date']=today
        df_all_data = df_all_data.fillna(-1)
        mysql_prediction_processor.execute_insert_sql(INSERT_LEVEL_RATE_PRICE_SQL, df_all_data[
            ['date', 'product_sku_key', 'product_sku_name', 'product_level_key',
             'product_level_name', 'product_key', 'product_name',
             'product_category_id', 'product_category_name', 'product_brand_id', 'product_brand_name',
             'predict_origin', 'forecast_reference_price', 'is_new_product', 'POLY_pred_price',
             'rank', 'price_3', 'price_2', 'price_1', 'saleprice', 'qty', 'mean_rate', 'score', 'base_price',
             'min_sell_price', 'sale_num', 'level_rate_price', 'avg_predict_basep_price',
             'predict_level_price_rate', 'saleprice_level_price_rate', 'mean_price_rate',
             'process_price', 'flag', 'diff_rate']].to_records(index=False).tolist())
        df_size = df_all_data.shape[0]
        logger.info('(通知消息)sku2手机数据等级比率不处理，拷贝数据完成! 时间:{} date={} 数据 category_id=1 ，dataSize={}'.format(today, date,
                                                                                                       df_all_data.shape[
                                                                                                           0]))
        # feishu_messager.send_message('(通知消息)sku2手机数据插入完成! 时间:{} date={} category_id@{} ，dataSize={} '.format(today,date,category_id, df_all_data.shape[0]))
        # save_data_to_gp(df_all_data)
        return df_size
    else:
        logger.info('(通知消息)sku2手机数据插入表数为空! 时间@{}  date={}数据  category_id=1 ，dataSize={} '.format(today, date,
                                                                                                 df_all_data.shape[0]))
        # feishu_messager.send_message(
        #     '(通知消息)sku2手机数据插入表数为空! 时间@{}  date={} category_id@{} ，dataSize={} '.format(today,date, category_id, df_all_data.shape[0]))
    return 0


"""
TODO
查找当天存入表中的等级比率
"""


def query_price_sku2_level_data():
    query_sql = """select DATE_FORMAT(date_add(curdate(),interval 1 day),'%Y%m%d') as date, product_sku_key, product_sku_name, product_level_key, 
                           product_level_name, product_key, product_name, product_category_id, product_category_name, product_brand_id,
                           product_brand_name, predict_origin, forecast_reference_price,is_new_product,POLY_pred_price,
                           rank,price_3,price_2,price_1,saleprice,qty,mean_rate,score,base_price,min_sale_price as min_sell_price,sale_num,level_rate_price,avg_predict_basep_price,
                           predict_level_price_rate,saleprice_level_price_rate,mean_price_rate,process_price,flag,diff_rate 
                           from price_prediction_level2_rate_price_brand_ljc where date=DATE_FORMAT(date_add(curdate(),interval 0 day),'%Y%m%d') """
    lsql = query_sql
    df_all_data = mysql_prediction_processor.load_sql(lsql)
    # save_data_to_gp(df_all_data)


def main():
    try:
        t1 = time.time()
        # 等级比率存储pkl文件名
        rs_file = 'result_df.pkl'
        flag = 0
        if flag == 1:
            result_df = process_level_rate_final()
            save_product_level_rate(result_df)
            ##存入pickle
            train_data = open(rs_file, 'wb')
            pickle.dump(result_df, train_data)
            train_data.close()
            check_conflict_file(FILE_DIR, 'result_android_df.pkl')
            save_pickle_data(FILE_DIR + 'result_android_df.pkl', result_df)
        else:
            # train_data = open(rs_file, 'rb')
            # result_df = pickle.load(train_data)
            result_df = load_pickle_data(FILE_DIR + 'result_android_df.pkl')
            print('dsd')
        logger.info('process process_level_rate_final use time @{}'.format(time.time() - t1))
        t1 = time.time()

        if (len(sys.argv) > 1):
            date = sys.argv[1]
            today = format_date(date)
        else:
            today = get_today();
        # today = get_today()
        md_date = format_date_string(today)

        # 根据等级比率处理后的pkl文件名
        rs_price_file = 'result_price_android_df.pkl'
        mysql_price_num = 0
        if (today.weekday() in [1, 3, 6]):
            result_df = merge_avg_product_level_rate(result_df)
            df = predict_price_data(result_df, flag=True)
            df.to_csv('/data/sunpengwei/tmp/product_price_level2_rate_config_v1_android.csv', encoding='utf-8-sig')
            # df =pd.read_csv('/data/sunpengwei/tmp/product_price_level2_rate_config_v1_android.csv', encoding='utf-8-sig')

            train_data = open(rs_price_file, 'wb')
            pickle.dump(df, train_data)
            train_data.close()

            # df=pd.read_csv('/data/sunpengwei/tmp/product_price_level2_rate_config_v1.csv', encoding='utf-8-sig')
            mysql_price_num = save_complement_data(df)
            logger.info('skulevel_v1手机模型执行完成 end！！！')
        else:
            mysql_price_num = insert_data_to_incre(md_date)
            print(1)
        # df.to_csv('/data/sunpengwei/tmp/predict_price_level_rate.csv', encoding='utf-8-sig')
        print('1')
        logger.info('process predict_price_data use time @{}'.format(time.time() - t1))
        feishu_messager.send_message('c端sku2_v1 android等级比率处理数据完成，处理完成={}条'.format(mysql_price_num))
        logger.info('等级比率处理数据 {}'.format(mysql_price_num))
        exit(0)
    except Exception as e:
        logger.info('error {}'.format(e))
        # feishu_messager.send_message('c端sku2等级比率处理数据失败：{}'.format(e))
        raise TypeError('等级比率报错:') from e
        exit(1)


if __name__ == '__main__':
    main()
    # query_price_sku2_level_data()
