# coding=utf-8
#import sys;
#sys.path.append('../')  # NoQA

import os;
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "auto_sale_spider.settings")  # NoQA
import django;
django.setup()  # NoQA

from selenium import webdriver
import time
import requests
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from spider.spider_selenium.html_parser_selenium import *
import time
from spider.models import *
import logging
import sys
import auto_sale_spider.settings as settings
import urllib2, urllib
import os
import selenium
# from selenium.webdriver.common.action_chains import action_chains
reload(sys)
sys.setdefaultencoding('utf-8')

class HtmlDownloader(object):
    def __init__(self):
        self.parser = HtmlParser()
        self.is_download_image_to_local = False

    # 将content写入名为fileName的文件中
    def writeInFile(self, fileName, content):
        f = open(fileName, 'w')
        f.write(content)
        f.close()

    ##############################################################################
    #                                  ajax 下载商品列表
    ##############################################################################

    def get_json_text(self, url):
        headers = {'user-agent': "Mozilla/5.0 (Windows NT 6.3; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0",
                   'Accept-Language': "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3"}
        try:
            r = requests.get(url, headers=headers, timeout=30)
            r.encoding = r.apparent_encoding

            json_text = r.content.decode("GB2312")

            return json_text
        except requests.exceptions.RequestException, e:
            print e

    # 通过ajax下载html代码
    def download_productList_html_list_by_ajax(self, shop):
        url = shop.url
        # 判断url是否为None
        if url is None:
            raise Exception('download_productList_html_list 商店的url是None')

        # 裁剪出基本url
        # https://shop166038364.taobao.com/shop/view_shop.htm?shop_id=166038364
        #从第8个位置开始查找'/'
        third_sprit_index = url.find('/', 8)
        base_url = url[:third_sprit_index] + '/i/asynSearch.htm?spm=a1z10.3-c.w4002-14695781431.116.3ff19e11lfUZv0&_ksTS=1504430525186_218&callback=jsonp219&mid=w-14695781431-0&wid=14695781431&path=%2Fsearch.htm&search=y'
        #logging.info('base_url=' + base_url)
        page_num = 1
        html_list = []
        while True:
            url = base_url + '&pageNo=' + str(page_num)
            page_num = page_num + 1
            logging.info('商店商品列表url=' + url)
            json_text = self.get_json_text(url)

            self.writeInFile('log.txt', json_text)

            # 如果出现'not found'提示控件，则退出循环
            soup = BeautifulSoup(json_text, 'html.parser')
            # p_elem['class']=[u'\\"item-not-found\\"']
            p_elem = soup.find('p', u'\\"item-not-found\\"')
            #p_elem = soup.find('p')
            #logging.info('p_elem[\'class\']=' + str(p_elem['class']))
            if p_elem is not None:
                #logging.info('p_elem[\'class\']=' + str(p_elem['class']))
                break;

            html_list.append(json_text)
        logging.info('download_product_html_list 商品列表页面数量：' + str(len(html_list)) + '页')

        return html_list

    ##############################################################################
    #                           selenium 下载商品列表
    ##############################################################################


    # 下载html代码
    def download_productList_html_list(self, shop):
        url = shop.url
        # 判断url是否为None
        if url is None:
            raise Exception('download_productList_html_list 商店的url是None')
        html_list = []

        driver = webdriver.Chrome('F:\chromedriver_win32\chromedriver.exe')
        driver.get(url)
        wait = WebDriverWait(driver, 10)
        try:
            while True:
                # div_elem = driver.find_element_by_id('J_LoginBox')
                # logging.info('class=' + div_elem.get_attribute('class'))

                # 获得这一页的html
                html = driver.page_source

                # 关于登陆模块
                # # <div class="sufei-dialog sufei-dialog-kissy"
                # div_elem = driver.find_element_by_css_selector('div.sufei-dialog.sufei-dialog-kissy')
                # if div_elem is not None:
                #     logging.info('找到div了')
                # else:
                #     logging.info('没找到div')
                #
                # # <div class="sufei-dialog-content"
                # div2_elem = driver.find_element_by_css_selector('div.sufei-dialog-content')
                # if div2_elem is not None:
                #     logging.info('找到div2_elem了')
                # else:
                #     logging.info('没找到div2_elem')
                #
                # # 保存div内容
                # self.writeInFile('div_content.txt', div2_elem.text)

                #self.writeInFile('log1.txt', html)

                html_list.append(html)

                # 将窗口滑到下方
                i = 1
                while i < 5:
                    driver.execute_script("window.scrollBy(0,2000)")
                    time.sleep(1)
                    i = i + 1

                # 跳转到下一页
                # 如果下一页按钮是不可点的，则退出循环
                # <a class="J_SearchAsync next"
                button_next_page = wait.until(
                    EC.presence_of_element_located((By.CSS_SELECTOR, 'a.J_SearchAsync.next')))

                #time.sleep(2)
                button_next_page.click()
        #如果捕获异常，说明没有下一页了
        except TimeoutException:
            logging.info('此商店的商品列表获取完毕')

            # print type(driver)
            # 获得这一页的html
            #html = driver.page_source
            #self.writeInFile('log2.txt', html)
        finally:
            driver.close()

        # 为何这样写，网站加载不出来
        # locator = (By.CLASS_NAME, 'shop-hesper-bd')
        # html = ''
        # try:
        #     WebDriverWait(driver, 20, 0.5).until(EC.presence_of_element_located(locator))
        #     html = driver.page_source
        # finally:
        #     driver.close()
        logging.info('download_product_html_list 商品列表页面数量：' + str(len(html_list)) + '页')
        return html_list

    ##############################################################################
    #                           selenium 下载商品细节
    ##############################################################################

    # 建立存储商品图片的文件夹
    # 并返回建立的路径
    def get_or_create_product_image_folder(self, product):
        # 商品图片文件夹名字
        folder_name = 'id=' + str(product.id) + ' name=' + product.name.strip()
        # 商品图片文件夹地址
        #folder_path = settings.MEDIA_ROOT + '/product_images'+ '/' + folder_name
        folder_path = os.path.join(settings.MEDIA_ROOT, 'product_images'+ '\\' + folder_name)
        #生成图片存放文件夹
        if not os.path.exists(folder_path):
            os.makedirs(folder_path)

        return folder_path

    def download_image(self, img_net_path, img_local_path):
        # 判断图片名称是否已经存在
        if os.path.exists(img_local_path):
            logging.info('此图名称已经存在 img_net_path=' + img_net_path)
        else:
            f = urllib2.urlopen(img_net_path)
            data = f.read()
            with open(img_local_path, "wb") as imgfile:
                imgfile.write(data)
            # 这个下载协议过期了
            # urllib.urlretrieve(img_net_path, img_local_path)


    # 下载种类、库存数据
    def download_and_parse_size_color_inventory(self, product, driver):
        logging.info('start download_and_parse_size_color_inventory')
        # 等待尺寸、颜色加载完毕
        # <div id="J_isku" class="tb-key tb-key-sku" shortcut-key="i"
        locator = (By.ID, 'J_isku')
        WebDriverWait(driver, 20, 0.5).until(EC.presence_of_element_located(locator))
        # 向下滑动页面，避免商品的颜色、尺寸过多，使按钮成为unclickable
        driver.execute_script("window.scrollBy(0,200)")

        # 得到尺寸和颜色种类列表
        logging.info('爬取尺寸')
        size_list, color_list = self.parser.parse_product_html(driver.page_source, product)

        # 得到控件，模拟点击
        # 得到种类块
        # <div class="tb-skin">
        kind_div = driver.find_element_by_css_selector('div.tb-skin')

        # 得到商品总库存
        # <span id="J_SpanStock" class="tb-count">15</span>
        stock_span_elem = kind_div.find_element_by_id('J_SpanStock')
        product_count = stock_span_elem.text
        logging.info('product_count=' + product_count)
        # 存入数据库
        product.count = int(product_count)
        product.save()

        # 得到种类列表
        # <ul data-property="尺码/鞋码" class="J_TSaleProp tb-clearfix">
        # <ul data-property="颜色分类" class="J_TSaleProp tb-img tb-clearfix">
        ul_elems = kind_div.find_elements_by_css_selector('ul.J_TSaleProp.tb-clearfix')

        size_length = len(size_list)
        color_length = len(color_list)
        # 有一个size和没有size都是1，都不用点击
        size_a_elems = ''  # size的a标签集合
        color_a_elems = ''  # color的a标签集合
        # 有大于1个尺寸标签
        # 需要点击尺寸标签
        if size_length > 1:
            # 如果size种类存在，一定是ul_elems[0]
            size_a_elems = ul_elems[0].find_elements_by_tag_name('a')
            # 需要点击color标签
            if color_length > 1:
                color_a_elems = ul_elems[1].find_elements_by_tag_name('a')
        # 不需要点击尺寸标签
        else:
            # 需要点击color标签
            if color_length > 1:
                # 判断尺码标签是有一个还是没有
                # 如果有一个尺码标签
                if len(ul_elems) == 2:
                    color_a_elems = ul_elems[1].find_elements_by_tag_name('a')
                elif len(ul_elems) == 1:  # 如果没有尺码标签
                    color_a_elems = ul_elems[0].find_elements_by_tag_name('a')

        logging.info('爬取库存')
        # 遍历所有库存
        for size_index in range(size_length):
            # 如果需要点击size标签
            if size_length > 1:
                size_a_elems[size_index].click()
            for color_index in range(color_length):
                # 如果需要点击color标签
                if color_length > 1:
                    try:
                        color_a_elems[color_index].click()
                    except selenium.common.exceptions.WebDriverException:
                        #driver.execute_script("window.scrollBy(0,10)")
                        #a_elem = color_a_elems[color_index]
                        # 滑动到控件
                        # logging.info('type(a_elem)=' + str(type(a_elem)))
                        # @property函数要当属性用
                        # a_elem.location_once_scrolled_into_view

                        driver.execute_script("window.scrollBy(0,200)")
                        logging.info('颜色尺寸较多，需要向下滑动，点击颜色和尺寸')
                        color_a_elems[color_index].click()

                # 得到库存
                # <span id="J_Span'Stock" class="tb-count">15</span>
                stock_span_elem = kind_div.find_element_by_id('J_SpanStock')
                stock_str = stock_span_elem.text
                logging.info(
                    color_list[color_index].name + ' ,' + size_list[size_index].name + '库存是' + stock_str)

                # 将库存存入数据库
                Inventory.objects.create(count=int(stock_str), product=product, size=size_list[size_index],
                                         color=color_list[color_index])

    # 下载商品细节图片
    def download_and_parse_detail_images(self, product, driver):
        logging.info('start download_and_parse_detail_images')
        # 下载商品图片
        # 得到图片控件容器
        # <div id="description" class="J_DetailSection tshop-psm ke-post">
        locator = (By.ID, 'description')
        div_description_elem = WebDriverWait(driver, 20, 0.5).until(EC.presence_of_element_located(locator))
        # 得到全部图片的控件
        # div_description_elem = driver.find_element_by_id('description')
        # <img align="absmiddle" style="width: 750.0px;float: none;margin: 0.0px;" src="https://img.alicdn.com/imgextra/i1/2156574561/TB2tDrxqFXXXXX2XXXXXXXXXXXX_!!2156574561.png">
        img_elem_list = div_description_elem.find_elements_by_tag_name('img')
        # 最后一幅图片的控件
        last_img_elem = img_elem_list[len(img_elem_list) - 1]

        # 向下滑动加载图片
        i = 1
        while i < 20:
            driver.execute_script("window.scrollBy(0,2000)")
            time.sleep(1)
            i = i + 1
            # 如果最后一个图片加载成功，则可以停止下滑
            img_src = last_img_elem.get_property('src')
            if '.jpg' == img_src[len(img_src) - 4:]:
                break;
        # 保存图片
        # 得到图片存储文件夹
        folder_path = self.get_or_create_product_image_folder(product)
        # 将图片保存到本地
        for img_elem in img_elem_list:
            # 图片网络地址
            img_net_path = img_elem.get_property('src')
            logging.info(
                '一共%d张图片，下载第%d张图片img src=%s' % (len(img_elem_list), img_elem_list.index(img_elem) + 1, img_net_path))
            # 图片本地地址
            # 图片名称使用淘宝的名称
            img_name = img_net_path[img_net_path.rindex('/') + 1:]
            img_local_path = os.path.join(folder_path, img_name)
            # 下载图片
            if(self.is_download_image_to_local):
                self.download_image(img_net_path, img_local_path)
                # 将图片数据保存到数据库
                DetailImage.objects.get_or_create(product=product, local_path=img_local_path, net_path=img_net_path, is_valid=False)
            else:
                DetailImage.objects.get_or_create(product=product,
                                                  net_path=img_net_path, is_valid=False)
    # 下载商品顶部图片
    def download_and_parse_booth_image(self, product, driver):
        logging.info('start download_and_parse_booth_image')
        # <ul id="J_UlThumb" class="tb-thumb tb-clearfix">'
        ul_elem = driver.find_element_by_id('J_UlThumb')
        # <img data-src="//gd3.alicdn.com/imgextra/i3/780995745/TB2ggN2dTMlyKJjSZFFXXalVFXa_!!780995745.jpg_50x50.jpg"
        # src="//gd3.alicdn.com/imgextra/i3/780995745/TB2ggN2dTMlyKJjSZFFXXalVFXa_!!780995745.jpg_50x50.jpg_.webp" />

        img_elem_list = ul_elem.find_elements_by_tag_name('img')
        # 保存图片
        # 得到图片存储文件夹
        folder_path = self.get_or_create_product_image_folder(product)
        # 将图片保存到本地
        for img_elem in img_elem_list:
            # 图片网络地址
            src_text = img_elem.get_property('src')

            src_text_finished_tail = src_text[:src_text.rindex('.jpg') + 4]
            img_net_path = ''
            if src_text_finished_tail[:6] != 'https:':
                logging.info('没有https:')
                img_net_path = 'https:' + src_text_finished_tail[:src_text_finished_tail.index('.jpg') + 4]
            else:
                logging.info('有https:')
                img_net_path = src_text_finished_tail

            img_net_path = img_net_path.replace('50x50', '400x400')

            logging.info(
                '一共%d张图片，下载第%d张图片img src=%s' % (
                len(img_elem_list), img_elem_list.index(img_elem) + 1, img_net_path))
            # 图片本地地址
            # 图片名称使用淘宝的名称
            img_name = img_net_path[img_net_path.rindex('/') + 1:]
            img_local_path = os.path.join(folder_path, img_name)
            try:
                if(self.is_download_image_to_local):
                    # 下载图片
                    self.download_image(img_net_path, img_local_path)
                    # 将图片数据保存到数据库
                    BoothImage.objects.get_or_create(product=product, local_path=img_local_path, net_path=img_net_path, is_valid=False)
                else:
                    # 将图片数据保存到数据库
                    BoothImage.objects.get_or_create(product=product, net_path=img_net_path, is_valid=False)

            except urllib2.HTTPError:
                logging.info('图片不存在')
    # 将product中的product_image存到专门的ListImage表中
    # 因为在下载product_image的时候，还没有建立product，所以需要在这里转储product_image
    # def store_list_image_in_pro_table(self, product):
    #     ListImage.objects.get_or_create(product=product, image_path=product.product_image)

    # 下载商品细节信息
    def download_and_parse_product_detail(self, product):
        logging.info('download_and_parse_product_detail 开始爬取' + product.name)
        url = product.url
        # 判断url是否为None
        if url is None:
            raise Exception('下载商品细节时，商品url为None')

        driver = webdriver.Chrome('F:\chromedriver_win32\chromedriver.exe')
        driver.get(url)
        # 这段代码无法设置窗口大小,解决：shell需要reload ipython
        # driver.set_window_size(1000, 30000)
        # time.sleep(10)

        try:
            self.download_and_parse_size_color_inventory(product, driver)

            self.download_and_parse_booth_image(product, driver)

            self.download_and_parse_detail_images(product, driver)

            # self.store_list_image_in_pro_table(product)
            product.is_crawled_detail = True
            product.save()

        except TimeoutException:
            logging.info('该商品可能已经下架')
            logging.info('下架商品名称 productName=' + product.name)
            logging.info('下架商品地址 productUrl=' + product.url)

        finally:
            driver.close()

        logging.info('end download_and_parse_product_detail')

        # <div id="J_DivItemDesc" class="content">
        # WebDriverWait wait = new WebDriverWait(dr, 10);
        # wait.until(ExpectedConditions.visibilityOfElementLocated(By.id("kw")));

def download_one_product(product_id):
    product = Product.objects.get(id=product_id)
    # 下载商品细节
    downloader = HtmlDownloader()
    downloader.download_and_parse_product_detail(product)

def download_product_from_this(product_id):
    product_list = Product.objects.all()
    product_id = product_id - 1
    for i in range(len(product_list))[product_id : len(product_list)-1]:
        logging.info('spider i=' + str(i))
        product = product_list[i]
        # 下载商品细节
        downloader = HtmlDownloader()
        downloader.download_and_parse_product_detail(product)

if __name__ == '__main__':
    # download_one_product(2)
    download_product_from_this(1)

