import re
import os
import json
import time
import jieba
import datetime
import requests
import urllib.parse
from django.utils import timezone
from .models import *
from . import trace_product_views
from bs4 import BeautifulSoup
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By

# from selenium.webdriver.common.desired_capabilities import DesiredCapabilities


jd_group_num = 0
tb_group_num = 1
sn_group_num = 2
jd_group = 'jd_group'
tb_group = 'tb_group'
sn_group = 'sn_group'
jd_login_url = "https://passport.jd.com/new/login.aspx"
jd_after_login = "https://www.jd.com/"
tb_login_url = "https://login.taobao.com/"
tb_after_login = "https://i.taobao.com/my_itaobao"
sn_login_url = "https://passport.suning.com/ids/login"
sn_after_login = "https://www.suning.com/"
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))  # 项目的根路径，用来保存图片
# webdriver_list = {}
driver = None


def set_search_options():
    # 配置 Chrome 浏览器选项
    chrome_options_ = Options()
    chrome_options_.add_argument('--headless')
    # chrome_options_.add_argument('--auto-open-devtools-for-tabs')
    # chrome_options_.add_argument("--remote-debugging-port=9222")
    return chrome_options_


def simulate_search(search_url, input_cookies, platform: str,
                    loop_sleep_time=0.01, end_sleep_time=0.5):
    # 本地模拟搜索操作
    # driver = webdriver.Chrome(options=set_search_options())
    # docker容器中的搜索操作
    global driver
    if driver is None:
        # driver = webdriver.Remote(
        #     command_executor="http://chrome:4444/wd/hub",
        #     options=set_search_options(),
        # )
        driver = webdriver.Chrome(options=set_search_options())
    driver.get(search_url)
    driver.delete_all_cookies()

    if platform == jd_group:
        for cookie in input_cookies:
            if cookie.get('domain') == '.jd.com':
                driver.add_cookie(cookie)
    driver.get(search_url)

    # 模拟滚动到底部
    page_height = driver.execute_script("return document.body.scrollHeight")
    current_scroll_position = 0
    scroll_increment = 500
    while current_scroll_position < page_height:
        current_scroll_position += scroll_increment
        if current_scroll_position > page_height:
            current_scroll_position = page_height
        driver.execute_script(f"window.scrollTo(0, {current_scroll_position});")
        time.sleep(loop_sleep_time)
    time.sleep(end_sleep_time)
    page_content = driver.page_source.encode('utf-8')
    return page_content


def store_cookies(cookie_list, response, group_name: str):
    for cookie in cookie_list:
        cookie_name = f"{group_name}_{cookie.get('name')}"
        response.set_cookie(
            key=cookie_name,
            value=cookie,
            samesite='None',
            secure=True
        )


def read_cookies(request, group_name):
    cookie_list = []
    for name, value in request.COOKIES.items():
        if name[0: len(group_name)] == group_name:
            cookie = eval(value)
            cookie_list.append(cookie)
    return cookie_list


@csrf_exempt
def check_cookies(request):
    if request.method == 'POST':
        data = json.loads(request.body.decode('utf-8'))
        group_num = data.get('group_num')
        if group_num == 0:
            jd_cookies_store = read_cookies(request, jd_group)
            if not jd_cookies_store:
                return JsonResponse({"is_cookie_store": False}, status=200)
            else:
                return JsonResponse({"is_cookie_store": True}, status=200)
        elif group_num == 2:
            sn_cookies_store = read_cookies(request, sn_group)
            if not sn_cookies_store:
                return JsonResponse({"is_cookie_store": False}, status=200)
            else:
                return JsonResponse({"is_cookie_store": True}, status=200)
    elif request.method == 'OPTION':
        return JsonResponse({"success": "OPTION operation"}, status=200)
    else:
        return JsonResponse({"error": "Method not allowed"}, status=405)


def init_browser(group_num: int):
    global driver
    if group_num == 0:
        driver.get(jd_login_url)
    elif group_num == 2:
        driver.get(sn_login_url)


# 获取登录的二维码
@csrf_exempt
def get_qr_code(request):
    if request.method == 'POST':
        global driver
        data = json.loads(request.body.decode('utf-8'))
        group_num = data.get('group_num')
        user_id = data.get('user_id')
        img_name = str(user_id) + ".png"
        if driver is None:
            # driver = webdriver.Remote(
            #     command_executor="http://chrome:4444/wd/hub",
            #     options=set_search_options(),
            # )
            driver = webdriver.Chrome(options=set_search_options())
        init_browser(group_num)
        time.sleep(4)
        if group_num == 0:
            # 截取该图片并存储
            img_element = driver.find_element(By.CLASS_NAME, 'qrcode-img')
            img_screenshot_path = os.path.join(BASE_DIR, 'static\\qrcode', img_name)
            print(f"-------------image stored in : {img_screenshot_path}-------------\n")
            img_element.screenshot(img_screenshot_path)  # 保存图片
        elif group_num == 2:
            img_element = driver.find_element(By.CLASS_NAME, 'qrCodesId')
            img_screenshot_path = os.path.join(BASE_DIR, 'static\\qrcode', img_name)
            print(f"-------------image stored in : {img_screenshot_path}-------------\n")
            img_element.screenshot(img_screenshot_path)  # 保存图片
        return JsonResponse({
            'qr_img_name': img_name,
        }, status=200)
    elif request.method == 'OPTION':
        return JsonResponse({"success": "OPTION operation"}, status=200)
    else:
        return JsonResponse({"error": "Method not allowed"}, status=405)


# 检查用户是否登录
@csrf_exempt
def check_login_status(request):
    if request.method == 'POST':
        global driver
        data = json.loads(request.body.decode('utf-8'))
        group_num = data.get('group_num')
        response = JsonResponse({'is_login': False}, status=200)
        if driver is None:
            # driver = webdriver.Remote(
            #     command_executor="http://chrome:4444/wd/hub",
            #     options=set_search_options(),
            # )
            driver = webdriver.Chrome(options=set_search_options())

        if group_num == 2:
            return response

        user_id = data.get('user_id')
        print(driver.current_url)
        if group_num == 0:
            if driver.current_url == jd_after_login:
                response = JsonResponse({'is_login': True}, status=200)
                store_cookies(driver.get_cookies(), response, jd_group)
        elif group_num == 2:
            if driver.current_url == sn_after_login:
                response = JsonResponse({'is_login': True}, status=200)
                store_cookies(driver.get_cookies(), response, sn_group)
        return response
    elif request.method == 'OPTION':
        return JsonResponse({"success": "OPTION operation"}, status=200)
    else:
        return JsonResponse({"error": "Method not allowed"}, status=405)


@csrf_exempt
def close_webdriver(request):
    if request.method == 'POST':
        # data = json.loads(request.body.decode('utf-8'))
        # user_id = data.get('user_id')
        # driver = webdriver_list.get(user_id)
        # if not driver:
        #     return JsonResponse({"success": "OPTION operation"}, status=200)
        # else:
        #     if driver.session_id is not None:
        #         driver.close()
        #     del webdriver_list[user_id]
        return JsonResponse({"success": "OPTION operation"}, status=200)
    elif request.method == 'OPTION':
        return JsonResponse({"success": "OPTION operation"}, status=200)
    else:
        return JsonResponse({"error": "Method not allowed"}, status=405)


def extract_id_from_url(url):
    # 1. 将 &amp; 转换为标准的 &
    url = url.replace('&amp;', '&')
    # 2. 解析 URL
    parsed_url = urllib.parse.urlparse(url)
    # 3. 提取查询参数
    query_params = urllib.parse.parse_qs(parsed_url.query)
    # 4. 获取 id 的值
    item_id = query_params.get('id', [None])[0]
    return item_id


def save_product(product_info: dict, group_num: int, search_word: str):
    # 存储商品信息
    filter_product = Product.objects.filter(
        search_word=search_word,
        platform=group_num,
        product_id=product_info.get('product_id'),
    )
    if filter_product.exists():
        print("start modify")
        filter_product[0].name = product_info.get('name'),
        filter_product[0].price = product_info.get('price'),
        filter_product[0].pic_url = product_info.get('pic_url'),
        filter_product[0].merchant = product_info.get('merchant'),
        filter_product[0].add_date = datetime.date.today()
        filter_product[0].save()
    else:
        new_product = Product(
            search_word=search_word,
            platform=group_num,
            product_id=product_info.get('product_id'),
            name=product_info.get('name'),
            price=product_info.get('price'),
            pic_url=product_info.get('pic_url'),
            merchant=product_info.get('merchant'),
        )
        new_product.save()
    # 存储商品历史信息
    today = datetime.date.today()
    filter_history_product = HistoryProduct.objects.filter(
        platform=group_num,
        product_id=product_info.get('product_id'),
        add_date=today
    )
    if not filter_history_product.exists():
        new_history_product = HistoryProduct(
            platform=group_num,
            product_id=product_info.get('product_id'),
            price=product_info.get('price'),
        )
        new_history_product.save()


def get_product_url_from_id(product_id: str, group: str):
    product_url = ""
    if group == jd_group:
        product_url = "https://item.jd.com/" + product_id + ".html"
    elif group == tb_group:
        product_url = "https://detail.tmall.com/item.htm?id=" + product_id
    elif group == sn_group:
        # 苏宁的id形式为xxx-yyy，商品对应的网页为.../xxx/yyy.html
        id_list = product_id.split('-')
        product_url = "https://product.suning.com/" + id_list[0] + "/" + id_list[1] + '.html'
    return product_url


def jd_search(search_word: str, jd_cookies_store, max_count=20):
    jd_search_url = "https://search.jd.com/Search?keyword=" + search_word + "&enc=utf-8"
    html = simulate_search(jd_search_url, jd_cookies_store, jd_group)
    # 解析搜索结果
    soup = BeautifulSoup(html, 'html5lib')
    # 查找商品
    products = soup.find_all('li', class_='gl-item')
    # 打印商品信息
    product_list = []
    count = 0
    for product in products:
        p_name = product.find('div', class_='p-name p-name-type-2').find('em').get_text(strip=True)  # 商品名称+描述
        if p_name is not None:
            price = product.find('div', class_='p-price').find('i').get_text(strip=True)  # 商品价格
            p_id = json.loads(product.find('div', class_='gl-i-wrap')['data-buried']).get('skuid')  # 商品id
            pic_url_wrapper = product.find('div', class_="p-img").find('img')  # 商品描述图片url
            pic_url_src = pic_url_wrapper.get('src')
            pic_url_raw = pic_url_wrapper.get('data-lazy-img') if pic_url_src is None else pic_url_src
            pic_url = "https:" + pic_url_raw if pic_url_raw.startswith("//") else pic_url_raw
            merchant = product.find('div', class_='p-shop').get_text(strip=True)  # 销售商家
            trace_product_views.inspect_trace_products(p_id, float(price))
            product_info_store = {
                "name": p_name,
                'price': float(price),
                "product_id": p_id,
                "pic_url": pic_url,
                'merchant': merchant,
            }
            product_info_return = {
                "name": p_name,
                'price': float(price),
                'group_num': jd_group_num,
                'product_id': p_id,
                "product_url": get_product_url_from_id(p_id, jd_group),
                "pic_url": pic_url,
                'merchant': merchant,
            }
            save_product(product_info_store, jd_group_num, search_word)
            product_list.append(product_info_return)
            count = count + 1
            if count >= max_count:
                return product_list
    return product_list


def jd_search_list(search_words: list, jd_cookies_store, max_count=20):
    jd_product_list = []
    for word in search_words:
        single_word_results = []
        database_results = Product.objects.filter(
            platform=jd_group_num,
            search_word=word,
        )
        if (database_results.exists()
                and datetime.date.today() - database_results[0].add_date < timezone.timedelta(days=2)):
            for db_result in database_results:
                single_word_results.append({
                    "name": db_result.name,
                    'price': db_result.price,
                    'group_num': db_result.platform,
                    'product_id': db_result.product_id,
                    "product_url": get_product_url_from_id(db_result.product_id, jd_group),
                    "pic_url": db_result.pic_url,
                    'merchant': db_result.merchant,
                })
        else:
            Product.objects.filter(search_word=word, platform=jd_group_num).delete()
            single_word_results = jd_search(word, jd_cookies_store, max_count)
        jd_product_list = jd_product_list + single_word_results
    return jd_product_list


def sn_search(search_word: str, sn_cookies_store, max_count=20):
    sn_search_url = "https://search.suning.com/" + search_word + "/"
    html = simulate_search(sn_search_url, sn_cookies_store, sn_group, end_sleep_time=4)
    # 解析搜索结果
    soup = BeautifulSoup(html, 'html5lib')
    # 查找商品
    count = 0
    product_list = []
    products = soup.find_all('div', class_='item-bg')
    for product in products:
        p_name = product.find('div', class_='title-selling-point').get_text(strip=True)  # 商品名称+描述
        price = product.find('span', class_='def-price').get_text(strip=True)  # 商品价格
        describe = product.find('div', class_='info-config')
        describe = describe.get_text(strip=True) if describe is not None else describe
        p_id = product.find('div', class_='res-opt one-third').find('a', class_='btn-db').get('id')
        pic_url_raw = product.find('div', class_="img-block").find('img').get('src')  # 商品描述图片url
        pic_url = "https:" + pic_url_raw if pic_url_raw.startswith("//") else pic_url_raw
        merchant = product.find('div', class_='store-stock').get_text(strip=True)  # 销售商家
        pattern = r"\d+\.\d+"  # 正则表达式匹配小数点形式的浮点数
        price_num = re.search(pattern, price)
        price_num = price_num.group() if price_num is not None else price_num
        if price_num is not None:
            trace_product_views.inspect_trace_products(p_id, float(price_num))
            product_info_store = {
                "name": p_name + " (" + describe + ")" if describe is not None else p_name,
                'price': float(price_num),
                "product_id": p_id,
                "pic_url": pic_url,
                'merchant': merchant,
            }
            product_info_return = {
                "name": p_name + " (" + describe + ")" if describe is not None else p_name,
                'price': float(price_num),
                'group_num': sn_group_num,
                "product_id": p_id,
                "product_url": get_product_url_from_id(p_id, sn_group),
                "pic_url": pic_url,
                'merchant': merchant,
            }
            save_product(product_info_store, sn_group_num, search_word)
            product_list.append(product_info_return)
            count = count + 1
            if count >= max_count:
                return product_list
    # 其因为utf-8编码的原因可能无法输出
    # print(product_list)
    return product_list


def sn_search_list(search_words: list, sn_cookies_store, max_count=20):
    sn_product_list = []
    for word in search_words:
        single_word_results = []
        database_results = Product.objects.filter(
            platform=sn_group_num,
            search_word=word,
        )
        if (database_results.exists()
                and datetime.date.today() - database_results[0].add_date < timezone.timedelta(days=2)):
            for db_result in database_results:
                single_word_results.append({
                    "name": db_result.name,
                    'price': db_result.price,
                    'group_num': db_result.platform,
                    'product_id': db_result.product_id,
                    "product_url": get_product_url_from_id(db_result.product_id, sn_group),
                    "pic_url": db_result.pic_url,
                    'merchant': db_result.merchant,
                })
        else:
            Product.objects.filter(search_word=word, platform=sn_group_num).delete()
            single_word_results = sn_search(word, sn_cookies_store, max_count)
        sn_product_list = sn_product_list + single_word_results
    return sn_product_list


# 商品信息搜索函数，该函数接收参数为：
# * is_jd_search：是否搜索京东商城中的商品信息
# * is_tb_search：是否搜索淘宝商城中的商品信息
# * is_jd_search：是否搜索苏宁易购中的商品信息
# * is_ascending_sort：是否将商品按照价格升序排列
# * is_descending_sort：是否将商品按照价格降序排列
# * search_info: 用户输入的搜索内容
@csrf_exempt
def search(request):
    print('----------searching...----------')
    success_response_dict = {"success": "POST operation"}
    if request.method == 'POST':
        data = json.loads(request.body.decode('utf-8'))
        is_jd_search = data.get('is_jd_search')
        is_sn_search = data.get('is_sn_search')
        sort_type = data.get('sort_type')  # 是否升序排列
        search_result = []
        search_info = data.get('search_info')
        search_words = jieba.lcut(search_info)
        # 获取京东搜索结果
        if is_jd_search:
            print('in jd_search')
            jd_cookies_store = read_cookies(request, jd_group)
            if not jd_cookies_store:
                is_jd_search = False
            else:
                jd_search_result = jd_search_list(search_words, jd_cookies_store)
                search_result = search_result + jd_search_result
        # 获取苏宁易购搜索结果
        if is_sn_search:
            sn_cookies_store = read_cookies(request, sn_group)
            sn_search_result = sn_search_list(search_words, sn_cookies_store)
            search_result = search_result + sn_search_result

        if sort_type == "ascending":
            search_result.sort(key=lambda k: k['price'])
        if sort_type == "descending":
            search_result.sort(key=lambda k: k['price'], reverse=True)
        success_response_dict['search_result'] = search_result
        success_response_dict['is_jd_search'] = is_jd_search
        success_response_dict['is_sn_search'] = is_sn_search
        response = JsonResponse(success_response_dict, status=200)
        print('----------search done----------')
        print(str(len(search_result)) + "items selected")
        return response
    elif request.method == 'OPTION':
        return JsonResponse({"success": "OPTION operation"}, status=200)
    else:
        return JsonResponse({"error": "Method not allowed"}, status=405)
