import requests
from lxml import etree
from lxml import html
from com.cjc import Post
from com.cjc import conn_mysql
from selenium import webdriver
import time
import re


def main_spider(low, high, thread_name):
    # 计数器
    count = 0
    # 页面数计数器
    count_page = 0
    # 暂存list
    posts = []
    str_list = conn_mysql.find_dict_item2_code()
    for l in range(low, high):
        print(thread_name,l,str(str_list[l]) +"**********************************")
        empty = True
        count_page=0
        while (empty):
            count_page = count_page + 1
            url = "https://www.smzdm.com/fenlei/" + str(str_list[l]) + "/h5c4s0f0t0p" + str(count_page) + "/#feed-main"
            print(thread_name,"…………正在下载页面：",url)
            header = {
                "user-agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
                "cookie": '__ckguid=Gth5WW8lVkYr8BWiW57kkg6; device_id=2130706433158389929829043560f622d29688a60c72cd2335e4a31769; homepage_sug=b; r_sort_type=score; __jsluid_s=ab548f8e769e802e54690973bcd4cf1e; _ga=GA1.2.139432992.1583899303; shequ_pc_sug=a; sess=MTQwNjF8MTU4ODkwNTEzNXw5OTg1Mjg0OTg2fDY0ODZhZTVjMDAwMDgwZTkxMzYyN2UyOTUzZTNiN2Zl; user=user%3A9985284986%7C9985284986; smzdm_user_source=1D0721FCBDF631797C4863B9ACB70D3E; smzdm_id=9985284986; userId=user:9985284986|9985284986; PHPSESSID=6a3dcdeec8e6ad0d0001a8c0b63b1709; Hm_lvt_9b7ac3d38f30fe89ff0b8a0546904e58=1584951657,1585016686,1585017140,1585121691; _zdmA.uid=ZDMA.FlxkuWQBS.1585121692.2419200; zdm_qd=%7B%22referrer%22%3A%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3DZS7pilFWR3HIb6msGjur1Qj7HYAQzNrS8D3L6MIxI7u%26wd%3D%26eqid%3Dde72e9f60001702c000000035e7b0997%22%7D; _gid=GA1.2.380098399.1585121693; ad_date=25; bannerCounter=%5B%7B%22number%22%3A0%2C%22surplus%22%3A1%7D%2C%7B%22number%22%3A0%2C%22surplus%22%3A1%7D%2C%7B%22number%22%3A0%2C%22surplus%22%3A1%7D%2C%7B%22number%22%3A0%2C%22surplus%22%3A1%7D%2C%7B%22number%22%3A0%2C%22surplus%22%3A1%7D%2C%7B%22number%22%3A0%2C%22surplus%22%3A1%7D%5D; ad_json_feed=%7B%22J_feed_ad4%22%3A%7B%22number%22%3A0%2C%22surplus%22%3A1%7D%7D; wt3_eid=%3B999768690672041%7C2158389975500234536%232158512484900723973; wt3_sid=%3B999768690672041; ad_load_feed4=%7B%22J_feed_ad4%22%3A%7B%22number%22%3A0%2C%22surplus%22%3A1%7D%7D; _zdmA.time=1585128780869.14459.https%3A%2F%2Fwww.smzdm.com%2F; _gat_UA-27058866-1=1; Hm_lpvt_9b7ac3d38f30fe89ff0b8a0546904e58=1585128881'}
            # 获取网页响应正文
            response = requests.get(url, headers=header)
            # 解码
            response_json_str = response.content.decode()
            response_html = etree.HTML(response_json_str)
            li_arr = response_html.xpath("//li[@class='feed-row-wide']")

            # print(len(li_arr))
            if len(li_arr) > 0:
                for li in li_arr:
                    href = str(li.xpath("./div/div[2]/h5/a/@href"))
                    href = href.split("'")[1]
                    print(thread_name,href)
                    response2 = requests.get(href, headers=header)
                    # 解码
                    response_json_str2 = response2.content.decode()
                    response_html2 = etree.HTML(response_json_str2)
                    title = response_html2.xpath('//*[@id="feed-main"]/div[2]/div/div[1]/h1/text()')
                    # 去掉多余空格
                    title = str(title).split("'")[1]
                    title = title[2:].strip()
                    title = re.sub('\s{3,}', '', title)
                    price = response_html2.xpath('//*[@id="feed-main"]/div[2]/div/div[1]/div[1]/span/text()')
                    # 去掉多余['']字符
                    if str(price).find("'") != -1:
                        price = str(price).split("'")[1]
                    else:
                        price = ""
                    # print(str(price)=="")
                    class_one = response_html2.xpath('//*[@id="feed-wrap"]/div/a[2]/span/text()')
                    if str(class_one).find("'") != -1:
                        class_one = str(class_one).split("'")[1]
                    else:
                        class_one = ""
                    class_two = response_html2.xpath('//*[@id="feed-wrap"]/div/a[3]/span/text()')
                    if str(class_two).find("'") != -1:
                        class_two = str(class_two).split("'")[1]
                    else:
                        class_two = ""
                    class_three = response_html2.xpath('//*[@id="feed-wrap"]/div/a[4]/span/text()')
                    if str(class_three).find("'") != -1:
                        class_three = str(class_three).split("'")[1]
                    else:
                        class_three = ""
                    picture_url = response_html2.xpath('//*[@id="feed-main"]/div[2]/a/img/@src')
                    if str(picture_url).find("'") != -1:
                        picture_url = str(picture_url).split("'")[1]
                    else:
                        picture_url = ""
                    # releae_time = response_html2.xpath('//*[@id="feed-main"]/div[2]/div/div[1]/h1/text()')
                    author = response_html2.xpath('//*[@id="feed-main"]/div[2]/div/div[2]/div[1]/span[2]/text()')
                    if str(author).find("'") != -1:
                        author = str(author).split("'")[1]
                    else:
                        author = ""
                    # begin_time = response_html2.xpath('//*[@id="feed-main"]/div[2]/div/div[1]/h1/text()')
                    # end_time = response_html2.xpath('//*[@id="feed-main"]/div[2]/div/div[1]/h1/text()')
                    worthy_num = response_html2.xpath('//*[@id="rating_worthy_num"]/text()')
                    if str(worthy_num).find("'") != -1:
                        worthy_num = str(worthy_num).split("'")[1]
                    else:
                        worthy_num = ""
                    unworthy_num = response_html2.xpath('//*[@id="rating_unworthy_num"]/text()')
                    if str(unworthy_num).find("'") != -1:
                        unworthy_num = str(unworthy_num).split("'")[1]
                    else:
                        unworthy_num = ""
                    collect_num = response_html2.xpath('//*[@id="content"]/div/div[1]/div[1]/span/text()')
                    if str(collect_num).find("'") != -1:
                        collect_num = str(collect_num).split("'")[1]
                    else:
                        collect_num = ""
                    buy_urls = response_html2.xpath('//a[@class="go-buy btn"]/@href')
                    try:
                        buy_url = buy_urls[0]
                    except:
                        break
                    # print(buy_url)
                    # discounts_url = response_html2.xpath('//*[@id="feed-main"]/div[2]/div/div[1]/h1/text()')
                    content = response_html2.xpath('//*[@id="feed-main"]/div[3]/article')
                    content = html.tostring(content[0])
                    # print(title, price, class_one, class_two, picture_url, author, worthy_num, unworthy_num, collect_num, buy_url)

                    worthy_num = int(worthy_num)
                    unworthy_num = int(unworthy_num)
                    post = Post(title, price, class_one, class_two, picture_url, author, str(worthy_num),
                                str(unworthy_num),
                                collect_num, buy_url, content, class_three)
                    if worthy_num == 0:
                        worthy_num = worthy_num + 1
                    if unworthy_num == 0:
                        unworthy_num = unworthy_num + 1
                    # print(worthy_num,unworthy_num)
                    if (str(price) != "" and worthy_num / unworthy_num >= 3):
                        posts.append(post)
                        count = count + 1
                        print(thread_name,l,"第" + str(count_page) + "页____________第" + str(count) + "数据", title, price)
                    print(thread_name,l,"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^count_page:",count_page)
                conn_mysql.save(posts)
                posts.clear()
            else:
                print("该页面为空")
                empty = False
