import requests
from selenium import webdriver
from selenium.webdriver.common.by import By
import  time
from lxml import etree
import csv
class Ssr1(object):
    def __init__(self):

        pass

    def sele_1(self):
        # 避免页面自动关闭
        options = webdriver.ChromeOptions()
        options.add_experimental_option('detach', True)

        # 创建一个浏览器
        driver=webdriver.Chrome()
        # 打开网页
        driver.get("https://scrape.center/")
        # 隐式等待
        driver.implicitly_wait(5)
        # 定位
        divs=driver.find_elements(By.XPATH,'//div[contains(@class,"el-col el-col-24 el-col-xs-24 el-col-sm-12 el-col-md-6")]')
        # print(divs)
        for div in divs:
            print(div.find_element_by_xpath('./a').text)
            # js点击页面
            driver.execute_script("arguments[0].click();",div.find_element_by_xpath('./a'))
            # 获取句柄
            a = driver.window_handles
            print(a)
            #js切换窗口
            driver.switch_to.window(a[1])
            driver.maximize_window()
            # time.sleep(5)
            exit()


    def reques_1(self,url):
        header={
            "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
        }
        # 发送请求获取响应
        response=requests.get(url,headers=header,verify=False)
        # 数据的提取
        html=response.text
        e_html=etree.HTML(html)
        new_html=e_html.xpath('//div[@class="el-card item m-t is-hover-shadow"]')
        # print(new_html)
        for i in new_html:

            div_1=i.xpath('.//div[@class="el-col el-col-24 el-col-xs-8 el-col-sm-6 el-col-md-4"]/a/@href')[0]
            # print('https://ssr1.scrape.center'+div_1)
            # 创建一个浏览器
            driver=webdriver.Chrome()
            # 打开网页
            driver.get('https://ssr1.scrape.center'+div_1)
            # 隐式等待
            driver.implicitly_wait(5)
            # print(driver.page_source)
            html_1=etree.HTML(driver.page_source)
            el_rows=html_1.xpath('//div[@id="detail"]//div[@class="el-row"]')
            for el_row in el_rows[1:len(el_rows)-1]:
                tex=el_row.xpath('.//h2/text()')[0]
                img=el_row.xpath('.//img[@class="image"]/@src')
                name=el_row.xpath('.//div[@class="el-card__body"]/p[1]/text()')
                # print(tex)
                # print(img)
                # print(name)
                a=zip(name,img)
                # print(list(a))

                # 数据的保存
                self.Csv_save(tex,list(a))

            # 剧照
            tex = el_rows[len(el_rows)-1].xpath('.//h2/text()')
            # 剧照图片
            img=el_rows[len(el_rows) - 1].xpath('.//img[@class="el-image__inner el-image__preview"]/@src')
            print(tex)
            print(img)
            for i in img:
                self.Txt_1(tex,i)

    # 以scv的形式保存
    def Csv_save(self,tex,a):
        with open("./{}.csv".format(tex),"w",encoding="utf-8")as f:
            save_data=csv.writer(f)
            save_data.writerows(a)

    # 以txt形式保存
    def Txt_1(self,tex,a):
        with open("./{}.txt".format(tex[0]),"a",encoding="utf-8")as f:
            f.write(a+'\n')


def main(ssrl):
    # ssr1.sele_1()
    ssr1.reques_1("https://ssr1.scrape.center/")


if __name__ == '__main__':
    ssr1=Ssr1()
    main(ssr1)
