# -*- utf-8 -*-
# author : li shi jie
# Email : yr785339493@qq.com
from django.http import HttpResponse
from django.shortcuts import render,redirect,reverse
from . import crawl_imge
flag = True
import traceback
pre_dic = {
        'A': 'https://www.ae.com/aerie-bras/aerie/s-cat/4840012?cm=sUS-cUSD&navdetail=mega:cat6610030:c1:p2',
        'B': 'https://www.yse-paris.com/fr/53-e-shop',
        'C': 'https://www.freepeople.com/china/xinpin/',
        'D': 'http://valentinenyc.com/shop/',
        'E': 'http://www.princessetamtam.com/fr/lingerie/sous-vetement-femme?sz=24&start=0&format=page-element&showall=true',
        'F': 'https://www.oysho.cn/itxrest/2/catalog/store/65009628/60361118/category/1498009/product?languageId=-7&appId=1',
        'G': 'https://www.stellamccartney.cn/cn/online/stella-mccartney/%E5%A5%B3%E5%A3%AB/%E9%AB%98%E7%BA%A7%E5%86%85%E8%A1%A3',
        'H': 'https://www.eberjey.com/intimates.html?product_list_limit=all',
        'I': 'https://www.lilipiache.com/collections/all',
        'J': 'https://forloveandlemons.com/collections/lingerie',
        'K': 'https://www.hollisterco.cn/on/demandware.store/Sites-hollister_cn-Site/zh_CN/Search-Start?cgid=6947751&start=0&sz=12&format=ajax',
        'L': 'https://www.une-nana-cool.com/item/inc/loaditem.php',
        'M': 'https://www.katthelabel.com/collections/shop-all',
        'N': 'https://www.mimiholliday.cn/%E6%94%B6%E8%97%8F/%E6%89%80%E6%9C%89%E5%86%85%E8%A1%A3?page=1',
        'O': 'https://sloaneandtate.com/collections/all',
        'P': 'https://www.fleurofengland.com/lingerie/shop-by/sets?p=1',
        'Q': 'https://www.heidiklumintimates.com/productcluster/product-search?GAStoreName=View+Store:+11325&GAVal=Order+By:+rank+Page+1&column=rank&filters=%7B%7D&includeUnavailable=false&includeUncategorized=false&limit=36&offset=0&order=1&q=&storeId=11325',
        'R': 'https://www.agentprovocateur.com/int_en/new-in?p=1',
        'S': 'https://www.thereformation.com/categories/swim?page=1'
    }
def test(request):


    global flag
    try:
        if request.method == 'POST':
            if flag == True:
                flag = False
                keys = request.POST.getlist('coffee')
                if keys:
                    for key in keys:
                        url = pre_dic.get(key)
                        spider = crawl_imge.Spider(url)
                        if 'www.freepeople.com' in url or 'www.lilipiache.com' in url or 'www.katthelabel.com' in url:
                            # 循环页数，先得到页数
                            page_count = spider.parse()
                            if page_count:
                                for page in range(1, page_count + 1):
                                    current_url = url+'?page=%s' % page
                                    print('开始下载%s' % current_url)
                                    res_urls = spider.download_image(current_url)
                                    if res_urls:
                                        spider.run(res_urls)
                                    else:
                                        flag = True
                                        return HttpResponse('超时请重试!')
                                flag = True
                                return HttpResponse('抓取完成')
                            else:
                                flag = True
                                continue
                        elif 'princessetamtam.com' in url:
                            res_urls, res_items = spider.parse()
                            if res_urls:
                                spider.run(res_urls)
                                pn = 1
                                while len(res_items) == 24:
                                    next_url_start = pn*24
                                    next_url = url.replace('start=0', 'start='+str(next_url_start))
                                    spider = crawl_imge.Spider(next_url)
                                    res_urls, res_items = spider.parse()
                                    spider.run(res_urls)
                                    pn += 1
                                flag = True
                                return HttpResponse('抓取完成')
                            else:
                                flag = True
                                return HttpResponse('超时了,重试')
                        elif 'hollisterco.cn' in url:
                            res_urls, res_items = spider.parse()
                            if res_urls:
                                spider.run(res_urls)
                                pn = 1
                                while len(res_items) == 12:
                                    next_url_start = pn*12
                                    next_url = url.replace('start=0', 'start='+str(next_url_start))
                                    spider = crawl_imge.Spider(next_url)
                                    res_urls, res_items = spider.parse()
                                    spider.run(res_urls)
                                    pn += 1
                                flag = True
                                return HttpResponse('抓取完成')
                            else:
                                flag = True
                                return HttpResponse('超时了,重试')
                        # 该网站为post请求，需要带参数
                        elif 'une-nana-cool.com' in url:
                            form_data = {
                                'items': '&qdn=003370201&p=1',
                                'all': '0',
                                'category': '1'
                            }
                            res_urls = spider.parse(form_data)
                            if res_urls:
                                spider.run(res_urls)
                                pn = 2
                                while len(res_urls) == 30:
                                    next_item = '&qdn=003370201&p=' + str(pn)
                                    form_data['items'] = next_item
                                    res_urls = spider.parse(form_data)
                                    spider.run(res_urls)
                                    pn += 1
                                else:
                                    form_data = {
                                        'items': '&qdn=003370202&p=1',
                                        'all': '0',
                                        'category': '2'
                                    }
                                    res_urls = spider.parse(form_data)
                                    if res_urls:
                                        spider.run(res_urls)
                                        pn = 2
                                        while len(res_urls) == 30:
                                            next_item = '&qdn=003370202&p=' + str(pn)
                                            form_data['items'] = next_item
                                            res_urls = spider.parse(form_data)
                                            spider.run(res_urls)
                                            pn += 1
                                flag = True
                                return HttpResponse('抓取完成')
                            else:
                                flag = True
                                return HttpResponse('超时了,重试')
                        elif 'www.mimiholliday.cn' in url:
                            res_urls, res_items = spider.parse()
                            if res_urls:
                                spider.run(res_urls)
                                pn = 2
                                while len(res_items) == 50:
                                    next_url = url.replace('page=1', 'page=' + str(pn))
                                    spider = crawl_imge.Spider(next_url)
                                    res_urls, res_items = spider.parse()
                                    spider.run(res_urls)
                                    pn += 1
                                flag = True
                                return HttpResponse('抓取完成')
                            else:
                                flag = True
                                return HttpResponse('超时了,重试')
                        elif 'sloaneandtate.com' in url:
                            res_urls, res_items = spider.parse()
                            if res_urls:
                                spider.run(res_urls)
                                pn = 2
                                while res_items == 48:
                                    next_url = url + '?page=%s' % pn
                                    spider = crawl_imge.Spider(next_url)
                                    res_urls, res_items = spider.parse()
                                    spider.run(res_urls)
                                    pn += 1
                                flag = True
                                return HttpResponse('抓取完成')
                            else:
                                flag = True
                                return HttpResponse('超时了,重试')
                        elif 'fleurofengland.com' in url:
                            res_urls, res_items = spider.parse()
                            if res_urls:
                                spider.run(res_urls)
                                pn = 2
                                while res_items == 36:
                                    next_url = url.replace('p=1', 'p='+str(pn))
                                    spider = crawl_imge.Spider(next_url)
                                    res_urls, res_items = spider.parse()
                                    spider.run(res_urls)
                                    pn += 1
                                flag = True
                                return HttpResponse('抓取完成')
                            else:
                                flag = True
                                return HttpResponse('超时了,重试')
                        elif 'heidiklumintimates.com' in url:
                            res_urls, res_items = spider.parse()
                            if res_urls:
                                spider.run(res_urls)
                                pn = 2
                                offset = 36
                                while res_items == 36:
                                    next_url = url.replace('Page+1', 'Page+' + str(pn)).replace('offset=0', 'offset='+str(offset))
                                    spider = crawl_imge.Spider(next_url)
                                    res_urls, res_items = spider.parse()
                                    spider.run(res_urls)
                                    pn += 1
                                    offset += 36
                                flag = True
                                return HttpResponse('抓取完成')
                            else:
                                flag = True
                                return HttpResponse('超时了,重试')
                        elif 'agentprovocateur.com' in url:
                            res_urls, res_items = spider.parse()
                            if res_urls:
                                spider.run(res_urls)
                                pn = 2
                                while res_items == 100:
                                    next_url = url.replace('p=1', 'p='+str(pn))
                                    spider = crawl_imge.Spider(next_url)
                                    res_urls, res_items = spider.parse()
                                    spider.run(res_urls)
                                    pn += 1
                                flag = True
                                return HttpResponse('抓取完成')
                        elif 'thereformation.com' in url:
                            res_urls, res_items = spider.parse()
                            if res_urls:
                                spider.run(res_urls)
                                pn = 2
                                while res_items == 20:
                                    next_url = url.replace('page=1', 'page='+str(pn))
                                    spider = crawl_imge.Spider(next_url)
                                    res_urls, res_items = spider.parse()
                                    spider.run(res_urls)
                                    pn += 1
                                flag = True
                                return HttpResponse('抓取完成')
                        print('正在抓取%s' % url)
                        res_urls = spider.parse()  # 返回的图片的链接,如果有图片链接，否则就返回请求超时
                        if res_urls:
                            spider.run(res_urls)
                        else:
                            flag = True
                            return HttpResponse('%s网站请求超时' % url)
                    flag = True
                    return HttpResponse('抓取完成')
                else:
                    flag = True
                    return HttpResponse('请选择要抓取的网站')
            else:
                flag = True
                return HttpResponse('正在抓取.....')
        else:
            return render(request, 'start.html')
    except Exception as e:
        flag = True
        traceback.print_exc()
        return HttpResponse('网站还没有收录进去')

def world(request):

    return redirect('test')

def hello(request):
    return render(request, 'index.html')

# def strart_crawl(request):
#     # urls = [
#         #     'https://www.ae.com/aerie-bras/aerie/s-cat/4840012?cm=sUS-cUSD&navdetail=mega:cat6610030:c1:p2',
#     #     'https://www.yse-paris.com/fr/53-e-shop',
#     # ]
#     global flag
#     try:
#         if request.method == 'POST':
#             if flag == True:
#                 flag = False
#                 url = request.POST.get('urls')
#                 if 'www.freepeople.com' in url:
#                     spider = crawl_imge.Spider(url)
#                     page_count = spider.parse()
#                     if page_count:
#                         for page in range(1, page_count + 1):
#                             current_url = url+'?page=%s' % page
#                             print('开始下载%s' % current_url)
#                             res_urls = spider.download_image(current_url)
#                             spider.run(res_urls)
#                         flag = True
#                         return HttpResponse('抓取完成')
#                     else:
#                         flag = True
#                         return HttpResponse('网站输入错误,请重新输入')
#                 else:
#                     spider = crawl_imge.Spider(url)
#                     print('-----------开始下载----------')
#                     res_urls = spider.parse()
#                     if res_urls:
#                         print('--------准备解析图片-----------')
#                         spider.run(res_urls)
#                         flag = True
#                         return HttpResponse('抓取完成')
#                     else:
#                         flag = True
#                         return HttpResponse('网站输入错误,请重新输入')
#             else:
#                 return HttpResponse('正在抓取中')
#
#         return HttpResponse('这样操作不可以哦')
#     except Exception as e:
#         print(e)
#         return HttpResponse('请求超时')


