from django.contrib.auth.models import User, Group
import datetime, time, requests, json, re, os, redis, pickle
from celery import Celery
from account.models import *
from invest.models import *
from urllib import parse
from disn.settings import YUNPIAN_APIKEY, PROJECT_PATH, BASE_DIR, QN_AK, QN_SK, DEBUG, ClAWLER_SCHEDULE_INTERVAL
from qiniu import Auth, BucketManager
from disn.utilities import *
from .zdmparser import *
# from .jd import *
# from .suning import *
# from .amazon import *
# from .tmall import *
# from .kaola import *
# from .yhd import *
from disn.settings import redis_instance, get_wechat_client
from lxml import etree

qiniu_auth = Auth(QN_AK, QN_SK)
bucket_manager = BucketManager(qiniu_auth)

celery = Celery('tasks', broker='redis://:Schenker123@127.0.0.1:6379/0')
header = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:32.0) Gecko/20100101 Firefox/32.0',}

s = requests.Session()

a = requests.adapters.HTTPAdapter(max_retries=3)
b = requests.adapters.HTTPAdapter(max_retries=3)
s.mount('http://', a)
s.mount('https://', b)

if not DEBUG:
    s.proxies = {
        'http': 'socks5://127.0.0.1:2080',
        'https': 'socks5://127.0.0.1:2080'
    }


@celery.task
def update_domain():
    domain_list = Domain.objects.filter(processed=False)[:20]
    for domain in domain_list:
        if not redis_instance.hexists('processed_domain_dict', domain.word_pinyin):
            print('Processing [%s]...' %domain.word_pinyin, end='')
            redis_instance.hset('processed_domain_dict', domain.word_pinyin, True)
            url = 'http://whois.chinaz.com/%s.com' %domain.word_pinyin
            resp = s.get(url, headers=header)
            raw = resp.text
            if raw.find('过期时间') > 0:
                m = re.search(r'过期时间.*?<span>(.+?)</span>', raw)
                if m:
                    domain.domain_expire_date = datetime.datetime.strptime(m.group(1), "%Y年%m月%d日")
                    domain.domain_available = False
                    print('Expired Date:',domain.domain_expire_date)
            else:
                domain.domain_available = True
                print('AVAILABLE!!!')
        else:
            print('[%s] already processed, skiped.' %domain.word_pinyin)
        time.sleep(2)
        if not redis_instance.hexists('processed_domain_dict', domain.word_addreviation):
            print('Processing [%s]...' %domain.word_addreviation, end='')
            redis_instance.hset('processed_domain_dict', domain.word_addreviation, True)
            url = 'http://whois.chinaz.com/%s.com' %domain.word_addreviation
            resp = s.get(url, headers=header)
            raw = resp.text
            if raw.find('过期时间') > 0:
                m = re.search(r'过期时间.*?<span>(.+?)</span>', raw)
                if m:
                    domain.word_addreviation_expire_date = datetime.datetime.strptime(m.group(1), "%Y年%m月%d日")
                    domain.word_addreviation_available = False
                    print('Expired Date:',domain.word_addreviation_expire_date)
            else:
                domain.word_addreviation_available = True
                print('AVAILABLE!!!')
        else:
            print('[%s] already processed, skiped.' %domain.word_addreviation)
        domain.processed = True
        domain.save()
        time.sleep(3)
    return 'Job update_domain done.'


def get_now_interval():
    now_hour = get_current_hour()
    if now_hour > 1 and now_hour < 5:
        return 1
    elif now_hour == 6 or now_hour == 24 or now_hour == 0:
        return 2
    elif now_hour == 7 or now_hour == 22 or now_hour == 23 :
        return 3
    elif now_hour == 8 or now_hour == 12 or now_hour == 18 or now_hour == 19 or now_hour == 20 or now_hour == 21:
        return 4
    else:
        return 5

def fetch_smzdm_image_upload_to_qiniu(article_id, image_url, datestamp):
    name, ext = os.path.splitext(image_url)
    ret, info = bucket_manager.fetch(image_url, 'disnlink', datestamp + '/' + article_id + ext)
    print('Fetch image (%s), status_code=%d.' %(image_url, info.status_code))
    if info.status_code == 200:
        file_url = '//qncdn.ztm.me/' + datestamp + '/' + article_id + ext
        return file_url
    else:
        print('fetch image error.[%d]' %info.status_code)
        return False

def fetch_smzdm_image_download_to_local(article_id, image_url):
    name, ext = os.path.splitext(image_url)
    saved_dir = '%s/media/z-imgs/' %BASE_DIR
    saved_file = '%s%s%s' %(saved_dir, article_id, ext)
    img_access_path = '/media/z-imgs/%s%s' %(article_id, ext)
    resp = s.get(image_url, headers=header, stream=True)
    resp.raise_for_status()
    if not os.path.exists(saved_dir):
        os.makedirs(saved_dir)
    with open(saved_file, 'wb') as handle:
        for block in resp.iter_content(1024):
            handle.write(block)
    return img_access_path


@celery.task
def print_proxy():
    ip_list = []
    for i in range(20):
        ip = s.get('http://members.3322.org/dyndns/getip').text
        ip_list.append(ip)
    print('====>Found %d actived proxy server:\n' %len(set(ip_list)))
    for ip in set(ip_list):
        print(ip)
    return 'Job print_proxy done.'

@celery.task
def delete_inactive():
    to_be_delete_day = datetime.date.today() - datetime.timedelta( days = 30 )
    items = Item.objects.filter(active=False, remark__isnull=False, create_at__lte=to_be_delete_day)[:500]
    cnt = 0
    for item in items:
        try:
            if item.qiniu_pic and item.qiniu_pic.find('//qncdn.ztm.me/') >= 0:
                qiniu_name = item.replace('//qncdn.ztm.me/','')
                bucket_manager.delete('disnlink', qiniu_name)
            item.delete()
            cnt +=1
        except BaseException as e:
            print(item.id, item.article_id, item.article_title)
            print(e)
    print('Deleted %d inactive records.' % cnt)

@celery.task
def update_inactive():
    day_before_30 = datetime.date.today() - datetime.timedelta( days = 30 )
    in_active_list = Item.objects.filter(active=False, remark__isnull=False, create_at__gte=day_before_30)
    cnt = 0
    for smzdm in in_active_list:
        if smzdm.article_link and smzdm.article_link.find('go.smzdm.com') >= 0:
            if not(smzdm.remark and len(smzdm.remark) > 0) :
                resp = s.get(smzdm.article_link, headers=header)
                print('====>GET(%s), status_code=%d' %(smzdm.article_link, resp.status_code))
                smzdm.remark = resp.text
                get_js_content(smzdm)
                unpacker_js_content(smzdm)
            update_sku_and_url(smzdm)
        elif smzdm.article_link and (smzdm.article_link.find('tmall') >= 0 or smzdm.article_link.find('taobao') >= 0):
            smzdm.commodity_url = smzdm.article_link
            if smzdm.commodity_url: smzdm.active = True
        if not DEBUG and smzdm.active:
            smzdm.remark = None
        if smzdm.active:
            print('Successfully updated in-actived item [%s]:(%s)' %(smzdm.article_id, smzdm.article_title))
            cnt += 0
        else:
            print('Failure updated in-actived item [%s][%s]:(%s)' %(smzdm.article_id, smzdm.article_mall, smzdm.article_title))
        smzdm.save()
    print('Successfully updated %d inactive records.' % cnt)
    return 'update_inactive done.'


# @celery.task
# def fetch_smzdm_jingxuan():
#     jingxuan_duplicated_cnt = redis_instance.get('jingxuan_duplicated_cnt')
#     if not jingxuan_duplicated_cnt:
#         jingxuan_duplicated_cnt = 0
#     jingxuan_duplicated_cnt = int(jingxuan_duplicated_cnt)
#     if jingxuan_duplicated_cnt > get_now_interval():
#         msg = 'jingxuan_duplicated_cnt=%d > interval=%d. Wait for approx %d mins.' %(jingxuan_duplicated_cnt,get_now_interval(),int(250*(jingxuan_duplicated_cnt//get_now_interval())//60))
#         jingxuan_duplicated_cnt = jingxuan_duplicated_cnt - get_now_interval()
#         redis_instance.set('jingxuan_duplicated_cnt', jingxuan_duplicated_cnt)
#         return 'Job fetch_smzdm_jingxuan delayed. %s' %msg
#     else:
#         print('Job fetch_smzdm_jingxuan start...  jingxuan_duplicated_cnt=%d <= interval=%d' %(jingxuan_duplicated_cnt, get_now_interval()))
#     time.sleep(45)
#     timesort = int(time.time())
#     jingxuan_duplicated_cnt = fetch_smzdm_json_more('http://www.smzdm.com/json_more', timesort)
#     return 'Job fetch_smzdm_jingxuan done. jingxuan_duplicated_cnt=%d, interval=%d' %(jingxuan_duplicated_cnt,get_now_interval())

@celery.task
def fetch_smzdm_faxian():
    faxian_duplicated_cnt = redis_instance.get('faxian_duplicated_cnt')
    if not faxian_duplicated_cnt:
        faxian_duplicated_cnt = 0
    faxian_duplicated_cnt = int(faxian_duplicated_cnt)
    if faxian_duplicated_cnt > get_now_interval():
        msg = 'faxian_duplicated_cnt=%d > interval=%d. Wait for approx %d mins.' %(faxian_duplicated_cnt,get_now_interval(),int(ClAWLER_SCHEDULE_INTERVAL*(faxian_duplicated_cnt//get_now_interval())//60))
        faxian_duplicated_cnt = faxian_duplicated_cnt - get_now_interval()
        redis_instance.set('faxian_duplicated_cnt', faxian_duplicated_cnt)
        return 'Job fetch_smzdm_faxian delayed. %s' %msg
    else:
        print('Job fetch_smzdm_faxian start...  faxian_duplicated_cnt=%d <= interval=%d' %(faxian_duplicated_cnt, get_now_interval()))
    timesort = int(time.time())
    faxian_duplicated_cnt = fetch_smzdm_json_more('https://faxian.smzdm.com/json_more?type=a&timesort=%d' % timesort, timesort, 'faxian')
    if faxian_duplicated_cnt == 0:
        last_timesort = redis_instance.get('last_timesort')
        if last_timesort:
            last_timesort = int(last_timesort)
            timesort = (timesort - last_timesort) // 2 + last_timesort
            print('*****Due to 0 duplicated records found, so scheduled an extra job.*****')
            faxian_duplicated_cnt = fetch_smzdm_json_more('https://faxian.smzdm.com/json_more?type=a&timesort=%d' % timesort, timesort, 'faxian', False)
    redis_instance.setex(name='last_timesort', time=1800, value=timesort) # expired in 600 seconds
    return 'Job fetch_smzdm_faxian done. faxian_duplicated_cnt=%d, interval=%d' %(faxian_duplicated_cnt,get_now_interval())

def fetch_smzdm_json_more(url, timesort, category, is_regular=True):
    # user_follow_list = UserFollow.objects.filter(active=True)
    user_vs_keyword_list = UserVsKeyword.objects.filter(active=True, expire_at__gte=datetime.datetime.now())
    cnt_dict = {}
    cnt_dict['%s_duplicated_cnt' %category] = 0
    # faxian_duplicated_cnt = 0
    # jingxuan_duplicated_cnt = 0
    datestamp = today_with_slash()
    resp = s.get(url, headers=header)
    print('##### JSON GET(%s), status_code=%d. #####' %(url, resp.status_code))
    smzdm_item_list = json.loads(resp.content)
    for item in smzdm_item_list:
        try:
            smzdm = Item.objects.get(article_id=item.get('article_id'))
            smzdm.crawl_cnt = 5 if smzdm.crawl_cnt >= 5 else  smzdm.crawl_cnt + 1
            cnt_dict['%s_duplicated_cnt' %category] = cnt_dict['%s_duplicated_cnt' %category] + 1
            print('Found Existed [%s]' %smzdm.article_title)
        except Item.DoesNotExist:
            smzdm = Item()
            smzdm.article_from = '1' # 1:什么值得买
            smzdm.article_category = category
            smzdm.article_id = item.get('article_id',None)
            smzdm.article_title = item.get('article_title',None)
            smzdm.article_price = item.get('article_price',None)
            article_summary = item.get('article_content',None)
            if article_summary:
                smzdm.article_summary = re.sub('<a.+?smzdm.+?>(.+?)</a>','\g<1>',article_summary) # filter smzdm
                smzdm.article_summary = re.sub(r'</?\w+[^>]*>','',article_summary) # replace html tag
            smzdm.article_date = item.get('article_date',None)
            try:
                mall, created = Mall.objects.get_or_create(name=item.get('article_mall','unknown'))
            except MultipleObjectsReturned:
                mall = Mall.objects.filter(name=item.get('article_mall','unknown')).order_by('id').first()
            smzdm.article_mall = mall
            smzdm.article_url = item.get('article_url',None)
            smzdm.article_link = item.get('article_link',None)
            try:
                channel, created = Channel.objects.get_or_create(name=item.get('article_channel_note', 'unknown'))
            except MultipleObjectsReturned:
                channel = Channel.objects.filter(name=item.get('article_channel_note','unknown')).order_by('id').first()
            smzdm.article_channel = channel
            try:
                top_category, created = Category.objects.get_or_create(name=item.get('article_top_category','unknown'))
            except MultipleObjectsReturned:
                top_category = Category.objects.filter(name=item.get('article_top_category','unknown')).order_by('id').first()
            smzdm.top_category = top_category
            gtm = item.get('gtm', None)
            if gtm:
                smzdm.sub_category = gtm.get('cates_str', None)
                try:
                    int(gtm.get('rmb_price', 0.0))
                except ValueError:
                    smzdm.rmb_price = 0.0
                else:
                    smzdm.rmb_price = gtm.get('rmb_price', 0.0)
                try:
                    brand, created = Brand.objects.get_or_create(name=gtm.get('brand', 'unknown'))
                except MultipleObjectsReturned:
                    brand = Brand.objects.filter(name=item.get('brand','unknown')).order_by('id').first()
                smzdm.brand = brand
            smzdm.timesort = timesort
            smzdm.crawl_cnt = 1
            smzdm.article_pic_style = item.get('article_pic_style', '')
            findw = re.findall(r'200px.+?200px.+?0px', smzdm.article_pic_style.lower())
            if len(findw) > 0:
                smzdm.article_pic_style = None  # no required to save for default style
            if category == 'faxian':
                smzdm.article_pic = item.get('article_pic_url', item.get('article_pic',None))
            else:
                smzdm.article_pic = item.get('article_pic', item.get('article_pic_url',None))
            # print('<<<<<Created New Item[%s][%s][%s]' %(smzdm.article_id, smzdm.article_mall, smzdm.article_title))
            try:
                if smzdm.article_link.find('go.smzdm.com') >= 0:
                    resp = s.get(smzdm.article_link, headers=header)
                    print('GET(%s), status_code=%d' %(smzdm.article_link, resp.status_code))
                    smzdm.remark = resp.text
                    get_js_content(smzdm)
                    unpacker_js_content(smzdm)
                    update_sku_and_url(smzdm)
                elif smzdm.article_link.find('tmall') >= 0 or smzdm.article_link.find('taobao') >= 0:
                    smzdm.commodity_url = smzdm.article_link
                    smzdm.article_link = None
                    if smzdm.commodity_url: smzdm.active = True
                else:
                    smzdm.commodity_url = smzdm.article_link
                    smzdm.article_link = None
            except Exception as e:
                smzdm.has_err = True
                smzdm.err_msg = str(e)
                smzdm.save()
            else:
                if smzdm and smzdm.active and smzdm.article_pic:
                    if DEBUG:
                        qiniu_img_url = fetch_smzdm_image_download_to_local(smzdm.article_id, smzdm.article_pic)
                    else:
                        qiniu_img_url = fetch_smzdm_image_upload_to_qiniu(smzdm.article_id, smzdm.article_pic, datestamp) # download images
                    if qiniu_img_url and len(qiniu_img_url) > 0:
                        smzdm.qiniu_pic = qiniu_img_url
                        smzdm.article_pic = None
                if not DEBUG and smzdm.active:
                    smzdm.remark = None
                # print('**********Ready to save [%s]**********'  %smzdm.article_id)
                try:
                    smzdm.save()
                    for user_vs_keyword in user_vs_keyword_list:
                        send_youhui_by_filter_keywords(smzdm, user_vs_keyword)
                except BaseException as e:
                    print('Error issued: %s' %str(e))
                # print('>>>>>Saved to database for [%s][%s].' %(smzdm.article_id, smzdm.article_title))
                print('<<<<<Created New Item[%s][%s][%s]>>>>>' %(smzdm.article_id, smzdm.article_mall, smzdm.article_title))
                print('-' * 100)
    if is_regular:
        if cnt_dict['%s_duplicated_cnt' %category] <= get_now_interval():
            redis_instance.set('%s_duplicated_cnt' %category, 20 // (cnt_dict['%s_duplicated_cnt' %category]+get_now_interval()) + get_now_interval())
        else:
            redis_instance.set('%s_duplicated_cnt' %category, cnt_dict['%s_duplicated_cnt' %category])
    print('#' * 100)
    return cnt_dict['%s_duplicated_cnt' %category]

def send_youhui_by_filter_keywords(smzdm, user_vs_keyword):
    sent_success = False
    article_title_lower = smzdm.article_title.strip().lower()
    keyword = user_vs_keyword.keyword.name.strip().lower()
    if article_title_lower.find(keyword) >= 0:
        if user_vs_keyword.include:
            include_lst = [x.strip() for x in user_vs_keyword.include.strip().lower().split('|')]
        else:
            include_lst = []
        if user_vs_keyword.exclude:
            exclude_lst = [x.strip() for x in user_vs_keyword.exclude.strip().lower().split('|')]
        else:
            exclude_lst = []
        if len(include_lst) > 0 and len(exclude_lst) > 0:
            for ik in include_lst:
                if article_title_lower.find(ik) < 0:
                    return
            for ek in exclude_lst:
                if article_title_lower.find(ek) >= 0:
                    return
            sent_success = send_user_youhui.delay(user_vs_keyword.user, '您好，您关注的商品有优惠！', smzdm, '点击购买', 'https://www.ztm.me/youhui/detail/%s/' % smzdm.id)
        elif len(include_lst) > 0:
            for ik in include_lst:
                if article_title_lower.find(ik) < 0:
                    return
            sent_success = send_user_youhui.delay(user_vs_keyword.user, '您好，您关注的商品有优惠！', smzdm, '点击购买', 'https://www.ztm.me/youhui/detail/%s/' % smzdm.id)
        elif len(exclude_lst) > 0:
            for ek in exclude_lst:
                if article_title_lower.find(ek) >= 0:
                    return
            sent_success = send_user_youhui.delay(user_vs_keyword.user, '您好，您关注的商品有优惠！', smzdm, '点击购买', 'https://www.ztm.me/youhui/detail/%s/' % smzdm.id)
        else:
            sent_success = send_user_youhui.delay(user_vs_keyword.user, '您好，您关注的商品有优惠！', smzdm, '点击购买', 'https://www.ztm.me/youhui/detail/%s/' % smzdm.id)
        if sent_success: # 微信通知成功
            user_vs_keyword.notify_times = user_vs_keyword.notify_times + 1
            user_vs_keyword.save()

@celery.task
def send_template_message(user_id, template_id, data, url):
    wechat_client = get_wechat_client()
    try:
        wechat_client.message.send_template (
            user_id = user_id,
            template_id = template_id,
            data = data,
            url = url)
    except Exception as e:
        print(e)
    # wechat.send_template_message(user_id=user_id,template_id=template_id,data=data,url=url,topcolor=topcolor)

@celery.task
def send_user_consult_notification(first, username, content, remark, url): # 用户咨询提醒
    wechat_client = get_wechat_client()
    user_group, created = Group.objects.get_or_create(name='user-consult-notification-group')
    '''
    ID5hZ35k8wvyjF2b9DmTve5bhvhhB7DO0YmCKGrG9U46M
    {{first.DATA}}
    用户名称：{{keyword1.DATA}}
    咨询内容：{{keyword2.DATA}}
    {{remark.DATA}}
    '''
    params = {
        'first': {
            'value': first,
            'color':'#173177'
        },
        'keyword1': {
            'value':username,
            'color':'#173177'
        },
        'keyword2': {
            'value':content,
            'color':'#173177'
        },
        'remark': {
            'value':remark,
            'color':'#173177'
        }
    }
    if user_group:
        users = user_group.user_set.all()
        for user in users:
            profile = user.profile
            if profile.subscribed and profile.openid:
                try:
                    wechat_client.message.send_template(
                        user_id = profile.openid,
                        template_id = '5hZ35k8wvyjF2b9DmTve5bhvhhB7DO0YmCKGrG9U46M',
                        data = params,
                        url = url)
                    # wechat.send_template_message(
                    #     user_id = profile.openid,
                    #     template_id = '5hZ35k8wvyjF2b9DmTve5bhvhhB7DO0YmCKGrG9U46M',
                    #     data = params,
                    #     url = url,
                    #     topcolor = '#FF0000'
                    # )
                except Exception as e:
                    print(e)

@celery.task
def send_user_youhui(user_profile, first, commodity, remark, url):
    wechat_client = get_wechat_client()
    '''
    bS6p73RfRI730QzmP7rS1e7oNehuqB-urTlFFuDdR-U
    {{first.DATA}}
    发送时间：{{keyword1.DATA}}
    报价方：{{keyword2.DATA}}
    报价产品：{{keyword3.DATA}}
    报价详情：{{keyword4.DATA}}
    {{remark.DATA}}
    '''
    params = {
        'first': {
            'value': first,
            'color':'#000000'
        },
        'keyword1': {
            'value': gen_timestamp(),
            'color':'#DC143C'
        },
        'keyword2': {
            'value':commodity.article_mall.name,
            'color':'#DC143C'
        },
        'keyword3': {
            'value':commodity.article_title,
            'color':'#173177'
        },
        'keyword4': {
            'value':commodity.article_price,
            'color':'#DC143C'
        },
        'remark': {
            'value': re.sub('<[^<]+?>', '', commodity.article_summary),
            'color':'#696969'
        }
    }
    try:
        # profile = user_vs_keyword.user
        if user_profile.subscribed and user_profile.category == '0': # 通知仅限工作人员
            wechat_client.message.send_template(
                        user_id = user_profile.openid,
                        template_id = 'bS6p73RfRI730QzmP7rS1e7oNehuqB-urTlFFuDdR-U',
                        data = params,
                        url = url)
            # wechat.send_template_message(
            #     user_id = user_profile.openid,
            #     template_id = 'VCGNiiDZfqD4nX1ndxIVHLtA_AJqchR0yjCC8SQ7Myc',
            #     data = params,
            #     url = url,
            #     topcolor = '#FF0000'
            # )
        return True
    except Exception as e:
        print(e)
        return False


@celery.task
def update_share_expired():
    share_list = UserShare.objects.filter(active=True, qrcode_type='0')
    now = datetime.datetime.now()
    for item in share_list:
        if item.qrcode_expire_at and item.qrcode_expire_at <= now:
            item.active = False
            item.save()
            print('set %s expired.' %item)

@celery.task
def fetch_book_info_from_douban(item):
    url = 'https://book.douban.com/subject_search?search_text=%s&cat=1001' %item.isbn
    resp = s.get(url, headers=header)
    raw = resp.text

@celery.task
def fetch_book_info_by_sku(item):
    '''
    {
      "top": "童书",
      "sub": "动漫/卡通",
      "url": "https://item.jd.com/12246026.html",
      "img": "https://img11.360buyimg.com/n7/jfs/t10033/136/339313020/679377/5c07e904/59cc6221Ncd97b556.jpg",
      "name": "植物大战僵尸2 武器秘密之妙语连珠成语漫画23",
      "author": " 笑江南 ",
      "pub": "中国少年儿童新闻出版总社",
      "pub_date": "2017-10",
      "sku": "12246026",
      "shop": "京东自营"
    }
    '''
    r = redis.StrictRedis(host='bw.ztm.me', port=6379, db=0, password='Schenker123')
    book = pickle.loads(r.hget(name='jdbook', key=item.sku.strip))
    item.url = book.get('url','').strip()
    item.sku = book.get('sku','').strip()
    item.name = book.get('name','').strip()
    # item.name_py
    # item.price
    # item.top_category
    # item.sub_category
    # item.summary
    # item.mall
    item.pic_url = book.get('img','').strip()
    item.author = book.get('author','').strip()
    item.pub = book.get('pub','').strip()
    item.pub_date = book.get('pub_date','').strip()
    # item.url_refer
    # item.url_refer_mb
    # item.grade
    # item.qiniu_pic
    item.active = True
    item.save()
    # resp = s.get(item.url, headers=header)
    # html = resp.text
    # html = html.replace('\n','')
    # root = etree.HTML(html)
    # nodes = root.xpath('//div[@id="name"]/div[@class="sku-name"]/text()')
    # if len(nodes) > 0:
    #     item.name = nodes[0].strip()
    # nodes = root.xpath('//div[@id="name"]/div[@class="sku-name"]/strong/text()')
    # if len(nodes) > 0:
    #     item.grade = nodes[0].strip()
    # nodes = root.xpath('//div[@id="p-author"]/text()')




