# Selenium官网： https://selenium-python.readthedocs.io/
#
# SeleniumPython文档（英文版）：http://selenium-python.readthedocs.org/index.html
#
# SeleniumPython文档（中文版）：https://selenium-python-zh.readthedocs.io/en/latest/faq.html
#
# Selenium 基本操作：https://www.yukunweb.com/2017/7/python-spider-Selenium-PhantomJS-basic/
#
# Selenium爬取淘宝信息实战：https://cuiqingcai.com/2852.html
#电影小爬虫
#作者：李广
#时间：2024-05-17 10:00
#说明：用于抓取电影天堂最新电影版块的所有数据
#     数据范围为1-300页，每页20个数据共6000个
#     数据抓取回来后，自动存入Sqlite数据库
from fake_useragent import UserAgent
import asyncio
import time
import aiohttp
from lxml import etree
import random
import sqlite3
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
from time import sleep
import threading
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By

from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from selenium import webdriver
import requests
import time
from lxml import etree
from  selenium.common.exceptions import NoSuchElementException
from util.date  import *



#url_news = input('请输入目标网址：')#''#目标页面-最新
url_top = ''#最热
url_topnum = ''#最多

#https://bbs.3dmgame.com/forum-3492-{}.html  #小说
#https://www.dy2018.com/19/ #电影
#Playwright
import re

sem = threading.Semaphore(2)  # 限制线程的最大数量
def reduce_spaces(s):
    return re.sub(r'\s+', ' ', s).strip()

value_list = []
#browser.execute_script('window.scrollTo(0,document.body.scrollHeight)')
class JdSpider:
    def __init__(self):
        # 这个是一个用来控制chrome以无界面模式打开的浏览器
        # 创建一个参数对象，用来控制chrome以无界面的方式打开


        # path = r'C:\Users\Administrator\AppData\Local\Google\Chrome\Application\chromedriver.exe'
        # service = Service(executable_path=path)
        week_str = get_current_mon_sun()
        day_str = get_current_day()
        season_str = get_current_season()
        url_1 = f'http://www.cavbd.cn/ranking/weekly/{week_str}.html'
        url_2 = f'http://www.cavbd.cn/ranking/daily/{day_str}.html'
        url_3 = f'http://www.cavbd.cn/ranking/quarterly/2024Q{season_str}CCTV.html'
        self.urls = [url_1,url_2,url_3]
        # driver.set_page_load_timeout(0.03)
        # driver = webdriver.Chrome(service=service)

        self.url = f'http://www.cavbd.cn/ranking/weekly/{week_str}.html'
        self.services = [Service('./chromedriver.exe'),Service('./chromedriver.exe'),Service('./chromedriver.exe')]


    def get_page(self,i,driver):
        #url = self.url.format(i)
        #如果 直接可以获得二级页面链接 xpath找准 定位即可，如果需要点击翻页才能获取，找到翻页按钮（连接），
        # 点击后click后等待足够的时间，再获取二级页面链接
        driver.get(self.urls[i])#打开电影页面
        time.sleep(6)
        # xpath_str = f'//ul[@class="active-box"]'
        # button = driver.find_element(By.XPATH,xpath_str)
        # button.click() #检查点1 ，翻页成功没
        # time.sleep(1)

        # #获取二级页面列表
        tr_collection = driver.find_elements(By.XPATH,'//div[@class="table"]//tbody/tr')
        #print(tr_collection)
        name_list = []
        for i, tr in enumerate(tr_collection):  # enumerate 列表转枚举 才能生成下标
            if i < 2:
                j = i + 1
                tds=tr.find_elements(By.XPATH,"//td")
                id=tds[(j-1)*6+0].text
                name = tds[(j-1)*6+1].text
                num = tds[(j-1)*6+2].text
                rate = tds[(j-1)*6+3].text
                prise = tds[(j-1)*6+4].text
                channel = tds[(j-1)*6+5].text
                #print(f"{id} {name} {num} {rate} {prise} {channel} ")
                name_list.append(f"{id} {name} {num} {rate} {prise} {channel} ")
            else:
                j = i + 1
                tds=tr.find_elements(By.XPATH,"//td")
                id=tds[(j-1)*5+0].text
                name = tds[(j-1)*5+1].text
                num = tds[(j-1)*5+2].text
                rate = tds[(j-1)*5+3].text
                prise = tds[(j-1)*5+4].text
                #print(f"{id} {name} {num} {rate} {prise} {channel} ")
                name_list.append(f"{id} {name} {num} {rate} {prise} {channel} ")
        print(name_list)



        # m_links = [a.get_attribute('href') for a in a_collection]
        #
        # print(m_links) #检查点2 ，获得二级连接列表没
        # for i,m_link in enumerate(m_links): #enumerate 列表转枚举 才能生成下标
        #     #print(real_url)
        #     driver.get(m_link) #打开二级页面
        #     try:
        #         img = driver.find_element(By.XPATH, '//div[@id="Zoom"]/img')
        #         m_img_src = img.get_attribute('src')#图片地址
        #         #print(m_img_src)
        #         m_rank = driver.find_element(By.XPATH, '//div[contains(@class,"position")]/span/strong').text#评分
        #         details = driver.find_element(By.XPATH, '//div[@id = "Zoom"]').text.replace(u'\u3000',u'').replace('◎','')#\u3000 是全角的空白符
        #         details_list = details.split('\n')
        #         #print(details_list)
        #         m_trans_name = details_list[0].replace('译名','')
        #         m_real_name = details_list[1].replace('片名','')
        #         m_year = details_list[2].replace('年代','')
        #         m_locate = get_info(details_list,'产地').replace('产地','')
        #         m_derector = get_info(details_list,'导演').replace('导演','')
        #         m_type = details_list[4].replace('类别','')
        #         m_performer = reduce_spaces(m_performers[i]).replace(' ','、')
        #         #print(m_rank)
        #         value_list.append({'m_id': type_title+ ""+str(i), 'm_trans_name': m_trans_name,
        #                              'm_img_src': m_img_src,
        #                              'm_link': m_link,
        #                              'm_real_name': m_real_name,
        #                              'm_derector': m_derector,
        #                              'm_year': m_year,
        #                              'm_type': m_type,
        #                              'm_performer': m_performer,
        #                              'm_locate': m_locate,
        #                              'm_rank':m_rank})
        #
        #         print(f'{type_title+ ""+str(i)} {m_img_src} {m_rank} {m_trans_name} {m_real_name} {m_year} {m_locate} {m_derector} {m_type} {m_performer} {m_rank}')
        #
        #     except NoSuchElementException as err:
        #         pass
        #         #print('空二级页面')










    def browser_work(self,i):
        options = webdriver.ChromeOptions()
        service = self.services[i]
        # 禁用JavaScript
        options.add_argument('--disable-javascript')
        # 启用无头模式
        #options.add_argument('--headless')

        # 使用上述配置初始化WebDriver
        # options.add_extension('./AdBlock_v3.11.2.crx')
        options.add_argument("--disable-blink-features=AutomationControlled")
        driver = webdriver.Chrome(options=options, service=service)
        self.get_page(i,driver)
        #driver.quit()

    def run_thread(self,i):
        with sem:  # 锁定线程的最大数量
            print(threading.current_thread().name)
            print('-' * 20)
            self.browser_work(i)
            # sleep(1)
            print('*' * 20)

    def main(self):
        # conn = sqlite3.connect('movieapp.db')
        # cursor = conn.cursor()
        # cursor.execute('delete from movie_tb')
        # tasks = [asyncio.ensure_future(self.get_page(i + 1)) for i in range(2)]  # for 循环5次，每循环一次调用一个task（），得到返回，得到5个返回值
        # loop = asyncio.get_event_loop()  # 得到时间运行的循环
        # print(f"{time.strftime('%H:%M:%S')} 开始调用协程任务 ")
        # loop.run_until_complete(asyncio.wait(tasks))
        # print(f"{time.strftime('%H:%M:%S')} 结束调用协程任务 ")
        ts = []

        for i in range(3):
            t = threading.Thread(target=self.run_thread, args=(i,))
            ts.append(t)
            t.start()
        for t in ts:
            t.join()
        # sql = '''insert into movie_tb(m_id, m_trans_name, m_img_src, m_link, m_real_name, m_derector, m_year, m_type, m_performer,m_locate,m_rank)
        #                 values(:m_id, :m_trans_name,:m_img_src,:m_link, :m_real_name, :m_derector, :m_year, :m_type, :m_performer, :m_locate,:m_rank)'''
        # for value in value_list:
        #     cursor.execute(sql, value)
        #     conn.commit()
        #
        # cursor.close()



def get_info(details_list,key):
    for details in details_list:
        if key in details:
            return details

jdSpider = JdSpider()
jdSpider.main()