# -*- coding: utf-8 -*-
"""
Created on Tue Mar 27 10:13:44 2018

@author:Devin

"""
from sklearn import metrics
import pandas as pd
#import os,logging
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn import linear_model
import numpy as np
from sklearn.linear_model import RidgeCV
from sklearn.linear_model import LassoCV
from sklearn.linear_model import ElasticNetCV
from sklearn.linear_model import BayesianRidge
from sklearn.linear_model import ARDRegression
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.externals import joblib
import sqlalchemy as sa
from datetime import datetime
import traceback
import random
import copy


class pd_batchdata():
    ''' read data from mysql
        root:vdoPassw0rd@10.255.131.78:20080/mbAppMysql
    '''
    def __init__(self,shop_list = ['SQ2106','SQ2110','SQ2123','SQ2124','SQ2128'],dbpare = 'mysql',name = 'jdbc',passwd = 'mysql',
                 host = '10.255.131.78:13306',dbname = 'mbAppMysql',tbname = 'DWD_CAM_TRACK_RST',
                 start_time="2017-03-20 08:10",
                 end_time=str(datetime.now())):
        self.shop_list = shop_list
        self.dbpare = dbpare
        self.name = name
        self.passwd = passwd
        self.host = host
        self.dbname = dbname
        self.tbname = tbname
        self.start_time = start_time
        self.end_time = end_time
#        self.logpath = logpath
#        self.log_name = log_name
        self.weather_date = 0  #weather test
        self.all_col = ['CAM_TRACK_RST_ID', 'SHOP_ID', 'CAM_ID', 'BATCH_COUNT', 'RECEPT_COUNT',
                        'REC_UPD_DATE']
        self.use_col = ['weather','weekday','SHOP_ID']
        self.max_min = ['BATCH_COUNT']
#        self.log = self._dir_path()
#        self.logger = logging.getLogger()
#        self.logger.setLevel(logging.DEBUG)
#        self._log_print()
#        self.logger.info('mkdir_log_done !')
        
        
    def start(self):
        try:
            pass
#            self.conn = sa.create_engine('{0}://{1}:{2}@{3}/{4}'.format(self.dbpare,self.name, self.passwd,
#                                     self.host,self.dbname)).connect()
#            self.logger.info('connect mysqldb success !')
        except:
            pass
#            self.logger.info('connect mysqldb error !')
#            f=open(self.log,'a')  
#            traceback.print_exc(file=f)  
#            f.flush()  
#            f.close()
        try:
            self.df = self.load_df()
            self.df = pd.DataFrame()#TEST
#            self.logger.info('load_data_done !')
        except:
            pass
#            self.logger.info('select databse error !')
#            f=open(self.log,'a')  
#            traceback.print_exc(file=f)  
#            f.flush()  
#            f.close()
#            self.logger.info('RANDOM NUM !')
            return {self.shop_list[0]:random.randint(10, 300),self.shop_list[1]:random.randint(10, 300),
                    self.shop_list[2]:random.randint(10, 300),self.shop_list[3]:random.randint(10, 300),
                    self.shop_list[4]:random.randint(10, 300)}
            
        if self.df.shape[0]==0:
#            self.logger.info('RANDOM NUM !')
            return {self.shop_list[0]:random.randint(10, 300),self.shop_list[1]:random.randint(10, 300),
                    self.shop_list[2]:random.randint(10, 300),self.shop_list[3]:random.randint(10, 300),
                    self.shop_list[4]:random.randint(10, 300)}
            
        if os.path.exists('ardRegression.pkl'):
            result={}
            for shopx in self.shop_list:
                dfs = self.selct_shop(shopx,self.end_time)
                result[shopx] = self.load_result(dfs.iloc[-1],'ardRegression.pkl')
                return result
        
        if not os.path.exists('ardRegression_new.pkl'):
#            self.logger.info('start deal data  !')
            self.clean_data()
            try:
                self.cdf = self.clean_data()
                if self.cdf == 0:
                    print('fuck!')
                else:
                    self.tb_list = self.cdf[0];self.all_df = self.cdf[1]
                    try:
                        self.model()
                    except:
                        return {self.shop_list[0]:random.randint(10, 450),self.shop_list[1]:random.randint(10, 450),
                        self.shop_list[2]:random.randint(10, 450),self.shop_list[3]:random.randint(10, 450),
                        self.shop_list[4]:random.randint(10, 450)}
            except:
#                self.logger.info('clean data or predict error !')
                f=open(self.log,'a')
                traceback.print_exc(file=f)
                f.flush()  
                f.close()
    
            
        
        
        
            
        
        
    
    def selct_shop(self,shop_id,time):
        return pd.read_sql("""
                             SELECT * FROM {0} 
                             WHERE "REC_UPD_DATE" <= '{1}' AND "SHOP_ID" == '{2}'
                             """.format(self.tbname,time,shop_id),self.conn)
        
    def _log_print(self):#log handler
        handler = logging.handlers.RotatingFileHandler(self.log, mode='w', 
                                                       encoding='utf-8') #backupCount=5, maxBytes=1024 * 1024
        fmt = '%(asctime)s - %(levelname)s - %(message)s'
        
        formatter = logging.Formatter(fmt) 
        handler.setFormatter(formatter)
          
#        self.logger.addHandler(handler)      
    def _dir_path(self):
        cache_root_dir = os.path.join(self.logpath,'log2batch_predict')
        if not os.path.exists(cache_root_dir):
            os.makedirs(cache_root_dir)
        return os.path.join(cache_root_dir,self.log_name)
        
    def load_df(self):
        return pd.read_sql("""
                             SELECT * FROM {0} 
                             WHERE "REC_UPD_DATE" > '{1}' AND "REC_UPD_DATE" <= '{2}'
                             """.format(self.tbname,self.start_time,self.end_time),self.conn)
        
    def get_weather(self,time_list):
        pass
        wdf = pd.DataFrame()
        wdf['date'] = time_list;wdf.index = pd.to_datetime(wdf.date)
        wdf['weather'] = 0
        return wdf
    
    def clean_data(self):
        try:
            self.wdf = self.get_weather(self.weather_date)
#            self.logger.info('get weather data done !')
        except:
#            self.logger.info('get weather data error !')
            f=open(self.log,'a')  
            traceback.print_exc(file=f)  
            f.flush()  
            f.close()
        if self.df.shape[0]>=60:
            tb_df = copy.deepcopy(self.df)
            tb_df.index = pd.to_datetime(tb_df.REC_UPD_DATE);tb_df = tb_df.sort_index()
            tb_df['weekday'] = ['wk_'+str(x) for x in tb_df.index.weekday]
            if self.get_weather(self.df.REC_UPD_DATE)>=60:
                tb_wdf = copy.deepcopy(self.df)
                tb_df['weather'] = [tb_wdf.loc[x].weather for x in tb_df.index]#columns:weather
            min_max_scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
            shops = list(set(tb_df.SHOP_ID));tb_list = [];all_df = pd.DataFrame()
            for shop in shops:
                temp = tb_df.loc[(tb_df['SHOP_ID'] == shop)&(tb_df['BATCH_COUNT'] >= 0),:];batch_count = temp.BATCH_COUNT
                temp  = pd.get_dummies(pd.concat([temp.loc[:,self.use_col],
                                                 pd.DataFrame(min_max_scaler.fit_transform(temp.loc[:,self.max_min]),
                                                 index = temp.index,columns=self.max_min)],axis=1))
                temp['batch_count'] = batch_count,temp =temp.dropna(axis=1)
                tb_list.append(temp.sort_index());all_df = all_df.append(temp.sort_index())
            return [tb_list,all_df]
        else:
#            self.logger.info('no enough data build a model !')
            return 0
        
    def model(self):
        print("all_df's shape is %s"%str(self.all_df.shape))
        for item in self.tb_list:
            if item.shape[0]<=30:
#                self.logger.info('this shop data is not enough !')
                continue
            ids = str(item.SHOP_ID[0])
            train = item.iloc[:-10,];test = item.iloc[-10:]
            trainx = train.iloc[:-1].drop(['batch_count','SHOP_ID'],axis=1,inplace=True)
            testx = item.iloc[-10:-1].drop(['batch_count','SHOP_ID'],axis=1,inplace=True)
            trainy = train.iloc[1:].batch_count;testy = test.iloc[1:].batch_count
            ardRegression = ARDRegression()
            ardRegression.fit(trainx,trainy.values.ravel())
            print ("最优的alpha值: ", ardRegression.alpha_)
            print("RMSE:",np.sqrt(metrics.mean_squared_error(testy,ardRegression.predict(testx))))
            print('the mean sqare error:%.2f' %np.mean(abs(ardRegression.predict(testx)-testy)))
            joblib.dump(ardRegression, 'ardRegression_%s.pkl'%(ids))

    def load_result(self,onedf,model_paths):
        temp = onedf;min_max_scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
        temp  = pd.get_dummies(pd.concat([temp.loc[:,self.use_col],
                                                 pd.DataFrame(min_max_scaler.fit_transform(temp.loc[:,self.max_min]),
                                                 index = temp.index,columns=self.max_min)],axis=1))
        temp['batch_count'] = batch_count,temp =temp.dropna(axis=1)
        temp.drop(['batch_count','SHOP_ID'],axis=1,inplace=True)
        models = joblib.load(model_paths)
        return models.predict(temp)



if __name__ == '__main__':
    testsx = pd_batchdata()
    result = testsx.start()
  
    
    
    
    