import pymysql
import requests
from time import sleep
from lxml import etree
from datetime import datetime
import re
import urllib3
urllib3.disable_warnings()
class tider:
    def __init__(self):
        self.host = "192.168.0.113"
        self.port = 3306
        self.user='coname'
        self.password = 'xZS6p4LDcKCwn4Yb'
        self.db="co"
        self.charset="utf8"
        self.num=1
        self.url = 'https://www.chaihezi.com/node/category/photo/page/'
        self.url2 ='https://www.chaihezi.com/page/'
        self.header={"User-Agent":"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;"}
        self.nu=1
    
    def open_mysql(self):#打开数据库
        try:
            conn = pymysql.connect(host=self.host,port=self.port,user=self.user,passwd=self.password,db=self.db,charset=self.charset)
            cursor = conn.cursor()
            print("连接数据库成功!")
            return conn,cursor
        except Exception as e:
            print("连接数据库失败>>>",e)

    def close_mysql(self,conn,cursor):#关闭数据库成功
        try:
            cursor.close()
            conn.close()
            print("断开数据库成功!")
        except Exception as e:
            print("断开数据库失败>>>",e)

    def input_mysql(self,datas):#插入数据
        conn,cursor = self.open_mysql()
        try:
            if not cursor.execute('select fileurl from co_art_product where fileurl = "{}"'.format(datas[5])):
                cursor.execute('insert into co_art_product (keywordid,srname,srtype,srtitle,fileurl,srimg) values("{}","{}","{}","{}","{}","{}")'.format(datas[0],datas[1],datas[2],datas[3],datas[4],datas[5]))
                conn.commit()
                print("插入数据成功!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!！")
        except Exception as e:
            print("插入数据失败$$$$$$$$$$$$$$$$$$$$$$$$$$$$",e)
        self.close_mysql(conn,cursor)
    
    def spider_title(self):#爬取标题数据
        # st=etree.HTML(requests.get(self.url+str(self.num)+"/",headers=self.header,verify=False).text)
        st=etree.HTML(requests.get(self.url2+str(self.num)+"/",headers=self.header,verify=False).text)
        content = requests.get(self.url2+str(self.num)+"/",headers=self.header,verify=False).text
        img_list=[]
        pat = '<noscript><img width="277" height="159" src="(.*?)" class="attachment-thumb-medium size-thumb-medium wp-post-image" alt="" loading="lazy" /></noscript>'
                    
        img_list = re.findall(pat,content)

        # print(url_list)
        # print(img_list)
        # meta={}
        # i=0
        # for url_ in url_list:
        #     meta[url_] = img_list[i]
        #     i=i+1
        # print(meta)
        post_row_list = st.xpath('//*[@id="page"]/div/div/div/section/div[2]/div/div')
        for post_row in post_row_list:
            articles = post_row.xpath('.//article')
            for arttcile in articles:
                self.nu=self.nu+1
                datas=[]
                tamp="{0:%y%m%d%H%M%S}".format(datetime.now())
                datas.append(int(tamp)+self.nu)
                srname=arttcile.xpath('.//div/div[1]/a//@title')[0]
                datas.append(srname)
                srtype='tide'
                datas.append(srtype)
                srtitle=arttcile.xpath('.//div/div[3]/p/text()')[0]
                datas.append(srtitle)
                fileurl=arttcile.xpath('.//div/div[1]/a//@href')[0]
                datas.append(fileurl)
                # print(datas)
                srimg = arttcile.xpath('.//div/div[1]/a//@src')[1]
                datas.append(srimg)
                # print(datas)
                self.input_mysql(datas)
        self.num=self.num+1
        #翻页
        # if self.num<36:
        if self.num<527:
            self.spider_title()
        else:
            return

    def run(self):#运行此脚本
        self.spider_title()

if __name__ == "__main__":
    tider().run()