import csv

import numpy as np
import pandas as pd
import sklearn
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestRegressor
class mydataset():
    def __init__(self):
        # 导入
        self.first=pd.read_csv('(sample)sam_tianchi_mum_baby_trade_history.csv')
        self.second=pd.read_csv('(sample)sam_tianchi_mum_baby.csv')

        # 去重
        # self.first.drop_duplicates(subset='user_id',inplace=True)
        # self.second.drop_duplicates(subset='user_id', inplace=True)

        self.result=pd.merge(self.first,self.second,how='inner',on=['user_id'])
        # self.new=pd.DataFrame(pd.to_datetime(self.result['birthday']))


        # 计算出各个婴儿年龄，设置为live列
        self.result['day']=self.result['day'].astype('string')
        self.result['birthday'] = self.result['birthday'].astype('string')
        self.result['live']=pd.to_datetime(self.result['day'])-pd.to_datetime(self.result['birthday'])
        self.result['live']=self.result['live'].astype('str')
        self.result['live'] = self.result['live'].map(lambda x :x.split(" ")[0])
        self.result['live'] = self.result['live'].astype('int')



        # 由于最大和最小天数为10326和-762，28岁婴儿不太可能，也许是给孙子购买奶粉，-762应该是提前购买的。一般五到六岁断奶，我选择直接抛弃相差时间大于7年的订单
        l=self.result[self.result.live>365*7].index
        self.result.drop(l,inplace=True)





        # 将属性摊开
        self.result=self.result.drop('property', axis=1).join(self.result['property'].str.split(';', expand=True).stack().reset_index(level=1, drop=True).rename('p'))
        self.result.head()
        self.result.dropna(axis=0,how='any')
        print(self.result.columns)


        # 查看property有多少
        plist=self.result['p'].tolist()
        self.pset = set()
        for i in plist:
            if type(i)==type('123'):
                self.pset.add(i.split(":")[0])
    def getData(self):
        return self.result
    def onehot(self):
        data=self.result
        x = pd.DataFrame()

        for col in ['user_id', 'cat_id', 'cat1', 'gender', 'p']:
            t = pd.get_dummies(data[col], prefix=col)
            x = pd.concat((x, t), axis=1)
        x = x.groupby(by=x.index, level=None).sum()
        # 转化为one-hot
        x[x != 0] = 1
        y = data['live']
        y = y.groupby(by=y.index, level=None).sum()

        return (x,y)


