import datetime
import json
import logging
import os
import shutil
import requests
import pandas as pd

from django.http import HttpResponse
from django.shortcuts import render
from django.template.loader import render_to_string
from django.utils import timezone
from psqlextra.query import ConflictAction
from django.core.serializers import serialize

from data.documents import MydataDocument
from m_baoke.celery import app
from .models import MyData, UploadHistory
from m_baoke import settings
from .forms import UploadFileForm
from data import utils


logger = logging.getLogger('django')


# 更新数据
def update_data(request):
    if request.method == 'POST':
        content = json.dumps({'success': True})
        form = UploadFileForm(request.POST, request.FILES)
        if form.is_valid():
            cleaned_data = form.cleaned_data
            # 文件名/uuid
            file_name = cleaned_data['qqfilename']
            qq_uuid = cleaned_data['qquuid']

            obj = UploadHistory.objects.filter(file_name=file_name)
            if obj.count() > 0:
                file_name = obj[0].file_name
                qq_uuid = obj[0].qq_uuid
            else:
                handle_upload(request.FILES['qqfile'], cleaned_data)
                uploaded = UploadHistory(qq_uuid=qq_uuid, file_name=file_name)
                uploaded.save()

            # 读xls数据，写入数据库
            read_data.apply_async((qq_uuid, file_name))
        else:
            content = json.dumps({
                'success': False,
                'error': '%s' % repr(form.errors)
            })
        return HttpResponse(content)

    return render(request, 'data/update_data.html')


# 处理上传
def handle_upload(f, file_attrs):
    """ Handle a chunked or non-chunked upload.
    """
    logger.info(file_attrs)

    chunked = False
    dst_folder = os.path.join(settings.UPLOAD_DIRECTORY, file_attrs['qquuid'])
    dst = os.path.join(dst_folder, file_attrs['qqfilename'])

    # Chunked
    if file_attrs.get('qqtotalparts') and int(file_attrs['qqtotalparts']) > 1:
        chunked = True
        dst_folder = os.path.join(settings.CHUNKS_DIRECTORY, file_attrs['qquuid'])
        dst = os.path.join(dst_folder, file_attrs['qqfilename'], str(file_attrs['qqpartindex']))
        logger.info('Chunked upload received')

    utils.save_upload(f, dst)
    logger.info('Upload saved: %s' % dst)

    # If the last chunk has been sent, combine the parts.
    if chunked and (file_attrs['qqtotalparts'] - 1 == file_attrs['qqpartindex']):
        logger.info('Combining chunks: %s' % os.path.dirname(dst))
        utils.combine_chunks(file_attrs['qqtotalparts'],
                             file_attrs['qqtotalfilesize'],
                             source_folder=os.path.dirname(dst),
                             dest=os.path.join(settings.UPLOAD_DIRECTORY, file_attrs['qquuid'],
                                               file_attrs['qqfilename']))
        logger.info('Combined: %s' % dst)

        shutil.rmtree(os.path.dirname(os.path.dirname(dst)))


# pandas 读数据
@app.task
def read_data(qq_uuid, qq_file):
    dst_folder = os.path.join(settings.UPLOAD_DIRECTORY, qq_uuid)
    dst = os.path.join(dst_folder, qq_file)
    if not (dst.endswith('.xlsx') or dst.endswith('.xls')):
        return
    # 清空数据库记录
    # MyData.objects.all().delete()
    # file = 'E:/m_baoke/upload/2fe78ee2-8c0a-4277-a45e-b097383f2813/sample.xlsx'
    file = dst
    df = pd.read_excel(file)
    print('去重, ...')
    df.drop_duplicates(['商品id'], keep=False, inplace=True)
    print('批量创建, ...')
    records = []
    for idx, row in df.iterrows():
        mydata = {}
        mydata['goods_id'] = str(row['商品id'])
        mydata['goods_name'] = row['商品名称']
        mydata['goods_image'] = row['商品主图']
        mydata['goods_detail_url'] = row['商品详情页链接地址']
        mydata['goods_category'] = row['商品一级类目']
        mydata['tbk_url'] = row['淘宝客链接']
        mydata['goods_price'] = row['商品价格(单位：元)']
        mydata['goods_sales'] = row['商品月销量']
        mydata['income_ratio'] = row['收入比率(%)']
        mydata['commission'] = row['佣金']
        mydata['seller_wangwang'] = row['卖家旺旺']
        mydata['seller_id'] = str(row['卖家id'])
        mydata['shop_name'] = row['店铺名称']
        mydata['platform_type'] = row['平台类型']
        mydata['coupon_id'] = row['优惠券id']
        mydata['coupon_total'] = row['优惠券总量']
        mydata['coupon_surplus'] = row['优惠券剩余量']
        mydata['coupon_value'] = row['优惠券面额']
        mydata['discount_start'] = pd.to_datetime(row['优惠券开始时间'], format='%Y-%m-%d')
        mydata['discount_end'] = pd.to_datetime(row['优惠券结束时间'], format='%Y-%m-%d') + datetime.timedelta(hours=23,
                                                                                                        minutes=59,
                                                                                                        seconds=59)
        mydata['coupon_url'] = row['优惠券链接']
        mydata['goods_coupon_spread_url'] = row['商品优惠券推广链接']
        records.append(mydata)

    print('删除过期优惠券记录 ...')
    dt = timezone.now()
    MyData.objects.filter(discount_end__lte=dt).delete()
    # upsert更新
    print('更新数据库 ...')
    MyData.objects.on_conflict(['goods_id'], ConflictAction.UPDATE).bulk_insert(records)
    # print('插入结果', obj)


# 下载数据
def download(request):
    if request.method == 'POST':
        res = requests.get('https://pub.alimama.com/coupon/qq/export.json?adzoneId=109896900148&siteId=1203700009')

    return HttpResponse(request, {'msg': 'ok;'})


# 搜索
def search(request):
    if request.method == 'POST':
        keywords = request.POST['words']
        hits = MydataDocument.search().query("match", goods_name=keywords)
        qs = hits.to_queryset()
        html = render_to_string('data/partial_search.html', {'goods': qs})
        return HttpResponse(html, content_type='application/html')

    return render(request, 'data/search.html')
