import re
import time

import requests
from pymysql import connect


class spider():
    def __init__(self):
        self.url="https://www.dytt89.com/"
        self.headers={
            'user-agent': 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8) Gecko/20051212 Firefox/1.5'
        }
        self.indexurl=""
        # 获取连接对象
        self.connect = connect(
            user='root',
            password='123456',
            host='127.0.0.1',
            port=3306,
            database='movietext',
            charset='utf8'
        )
        # 获取游标对象
        self.cs = self.connect.cursor()
        try:
            create_sqli = "create table movieben (id int, title varchar(255),link varchar(255),indexlink longtext);"
            self.cs.execute(create_sqli)
        except Exception as e:
            print("创建数据表失败:", e)
        else:
            print("创建数据表成功;")
    def get_data(self):
        response=requests.get(url=self.url,headers=self.headers)
        #解决乱码问题
        response.encoding='gb2312'
        #print(response.text)
        s.parse_data(response.text)
    def parse_data(self,data_demo):
        # 解析列表页数据
        datas=re.findall('div\sclass="co_area2".*?2023必看热片.*?<ul>(.*?)</ul>',data_demo,re.S)
        #data=datas.strip()
        data=re.findall("<li><a\shref='(.*?)'\stitle=.*?>(.*?)<.*?</li>",datas[0],re.S)
        #print(data)
        for data_d in data:
            link='https://www.dytt89.com'+str(data_d[0])
            title=data_d[1]
            print('电影链接:',link)
            time.sleep(3)
            self.get_indexlink(link,link,title)
            print('电影名称:',title)
            print('===============================')

    def get_indexlink(self,index_url,link,title):
        self.indexurl=index_url
        responses=requests.get(url=self.indexurl,headers=self.headers)
        responses.encoding='gb2312'
        #print(responses.text)
        index_urls=re.findall('<td\sstyle="WORD-WRAP:\sbreak-word"\sbgcolor="#fdfddf"><a\shref="(.*?)">.*?</a></td>',responses.text,re.S)
        index=index_urls[0]
        print('下载链接:',index)
        self.save_data(title,link,index)
    def save_data(self,title,link,index):
        sql = 'insert into movieben(title,link, indexlink) values(%s, %s, %s)'
        self.cs.execute(sql, [title, link, index])
        self.connect.commit()
if __name__=='__main__':
    s=spider()
    s.get_data()