#! /usr/bin/env python
# -*- coding: utf-8 -*-
#  Create by Albert_Chen
#  CopyRight (py) 2016年 陈超. All rights reserved by Chao.Chen.
# 这个脚本是用来讲csv数据导入到elasticsearch 当中

__author__ = 'Albert'


import csv,codecs,cStringIO

import json
import requests
import datetime
import pandas as pd
from dateutil.parser import parse

REMOTE = 'http://localhost:9200'
API = '/cbi360/'

def upload_csv(csv_path):

    with open(csv_path, 'r') as csvfile:
        reader = UnicodeReader(csvfile)
        header = reader.next()
        for row in reader:
            data = dict((unicode(k), unicode(v)) for k, v in zip(header, row))
            cid = data['CID']
            data['CreateDate'] = parse(data.get('CreateDate','1900-01-01 00:00:00')).strftime("%Y-%m-%d")
            data['LastRTBTime'] = parse(data.get('LastRTBTime','1900-01-01 00:00:00')).strftime("%Y-%m-%d")
            url = REMOTE + API + str(cid) + '?pretty'
            try:
                print data
                # resp = requests.request("PUT", url, data=json.dumps(data))
                # if (resp.status_code == 200) or (resp.status_code == 201):
                #     print "%s success "%cid
                # else:
                #     print "%s fail "%cid
            except Exception,e:
                print e
                continue

def upload_item(data, id_name, type_name):
    item_id = data.get(id_name)
    url = REMOTE + API  + type_name + '/' + str(item_id) +'?pretty'
    data = json.dumps(data, default=json_serial)
    try:
        resp = requests.put(url, data)
    except Exception,e:
        print e
    else:
        if resp.status_code == 400:
            print data
            print resp.content

def upload_df_data(filepath, id_name, type_name):

    if filepath.endswith('.xlsx') or filepath.endswith('xls'):
        df = pd.read_excel(filepath)
    elif filepath.endswith('.csv'):
        df = pd.read_csv(filepath)

    else:
        print '[Err] No support format file input, please input csv or excel file'
        return
    for i in range(len(df)):
        data = df.ix[i].to_dict()
        upload_item(data, id_name, type_name)

class UTF8Recoder:
    def __init__(self, f, encoding):
        self.reader = codecs.getreader(encoding)(f)
    def __iter__(self):
        return self
    def next(self):
        return self.reader.next().encode("utf-8")

class UnicodeReader:
    def __init__(self, f, dialect=csv.excel, encoding="utf-8-sig", **kwds):
        f = UTF8Recoder(f, encoding)
        self.reader = csv.reader(f, dialect=dialect, **kwds)
    def next(self):
        '''next() -> unicode
        This function reads and returns the next line as a Unicode string.
        '''
        row = self.reader.next()
        return [unicode(s, "utf-8") for s in row]
    def __iter__(self):
        return self

class UnicodeWriter:
    def __init__(self, f, dialect=csv.excel, encoding="utf-8-sig", **kwds):
        self.queue = cStringIO.StringIO()
        self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
        self.stream = f
        self.encoder = codecs.getincrementalencoder(encoding)()
    def writerow(self, row):
        '''writerow(unicode) -> None
        This function takes a Unicode string and encodes it to the output.
        '''
        self.writer.writerow([s.encode("utf-8") for s in row])
        data = self.queue.getvalue()
        data = data.decode("utf-8")
        data = self.encoder.encode(data)
        self.stream.write(data)
        self.queue.truncate(0)

    def writerows(self, rows):
        for row in rows:
            self.writerow(row)


def json_serial(obj):
    """JSON serializer for objects not serializable by default json code"""

    if isinstance(obj, datetime.datetime):
        serial = obj.isoformat()
        return serial
    raise TypeError ("Type not serializable")

class DateTimeEncoder(json.JSONEncoder):
    def default(self, o):
        if isinstance(o, datetime):
            return o.isoformat()

        return json.JSONEncoder.default(self, o)

if __name__ == "__main__":

    upload_df_data('2017_2_data.xlsx', id_name='ArticleID',type_name='article')