# coding = utf-8
from PIL import Image
from django.http import JsonResponse, HttpResponse
from QipaoProject.crawlers import crawlers as cl
import json

# 进行爬虫操作
def docrawler(request):
    res = ''
    i = 0
    num_baidu = 0
    num_sougou = 0
    num_360 = 0
    
    keyword = request.POST.get("keyword")
    if keyword == 'qipao':
        keyword = '旗袍'
    else:
        keyword = '上衣'
    flag1 = request.POST.get("baidu")
    flag2 = request.POST.get("sougou")
    flag3 = request.POST.get("360")
    num = int(request.POST.get("num"))

    print(flag1, flag2, flag3)
    
    # 计算启动多少个数据源的爬虫程序
    if flag1 == 'true':
        i += 1
    if flag2 == 'true':
        i += 1
    if flag3 == 'true':
        i += 1

    # 每个数据源分多少张图片
    per_num = num // i
    
    if flag1 == 'true':
        num_baidu = per_num
        num -= per_num
    if flag2 == 'true':
        if flag3 == 'true':
            num_sougou = per_num
            num -= per_num
        else:
            num_sougou = num
            num = 0
    num_360 = num

    save_path = 'static/images/unlabel'

    # 调用多线程爬虫
    count_time = cl.doCrawler(keyword, num_baidu, num_sougou, num_360, save_path)
    
    res += str(request.POST.get("num"))
    return HttpResponse(res)
