#!/usr/bin/python3
# author:szw cl
# 2023年06月29日
# Email:1259577135@qq.com
# desc:
import re
import time
from retrying import retry
from bs4 import BeautifulSoup
import requests

proxies = {'http': '127.0.0.1:15732',
           'https': '127.0.0.1:15732'}


@retry(stop_max_attempt_number=3)
def get_list(kw, bookmarks):
    print("=")
    # time.sleep(10)
    headers = {
        "authority": "www.pinterest.com",
        "accept": "application/json, text/javascript, */*, q=0.01",
        "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
        "content-type": "application/x-www-form-urlencoded",
        "origin": "https://www.pinterest.com",
        "referer": "https://www.pinterest.com/",
        "sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Microsoft Edge\";v=\"114\"",
        "sec-ch-ua-full-version-list": "\"Not.A/Brand\";v=\"8.0.0.0\", \"Chromium\";v=\"114.0.5735.110\", \"Microsoft Edge\";v=\"114.0.1823.41\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-model": "\"\"",
        "sec-ch-ua-platform": "\"Linux\"",
        "sec-ch-ua-platform-version": "\"5.15.0\"",
        "sec-fetch-dest": "empty",
        "sec-fetch-mode": "cors",
        "sec-fetch-site": "same-origin",
        "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.41",
        "x-app-version": "5106292",
        "x-csrftoken": "8197c45d570ad6c576ee046df06cc625",
        "x-pinterest-appstate": "background",
        "x-pinterest-experimenthash": "80eb1963e70534944370cd0841537976b1c669d5ded5de41c63d109cccb95662cc4e71b0fb1785d87eae1e11c05092a23423f560b37cc8d5d3d8cd0117226036",
        "x-pinterest-pws-handler": "www/search/[scope].js",
        "x-requested-with": "XMLHttpRequest"
    }
    cookies = {
        "csrftoken": "8197c45d570ad6c576ee046df06cc625",
        "_pinterest_sess": "TWc9PSY2Qk1MczVjeldTTFBlOVp6NHVDOEd2cC9USmJRYThLQm9xbm5GV2Z3SlNldkI5clk4c09NTE4wNHlDVmxHL1RxMmxLNUgrYmVYUmJlUzFOeGV6UVY1eFdXc0diN0J6alJMOENnNkxHRWJZbz0maG5BUkVmTnhYZHFkVmk4VGpRUmtXeVl2dFNNPQ==",
        "_auth": "0",
        "_routing_id": "\"ea2e6ce7-e393-4e31-80b7-181e1f0d7584\"",
        "sessionFunnelEventLogged": "1",
        "hasBeenShownLoginModalInOpen": "true"
    }
    url = "https://www.pinterest.com/resource/BaseSearchResource/get/"
    data = {
        # "source_url": "/search/pins/?q=%E6%B1%BD%E8%BD%A6%E6%A6%82%E5%BF%B5%E8%AE%BE%E8%AE%A1&rs=typed",
        "data": "{\"options\":{\"article\":\"\",\"appliedProductFilters\":\"---\",\"price_max\":null,\"price_min\":null,\"query\":\"" + str(kw) + "\",\"scope\":\"pins\",\"auto_correction_disabled\":\"\",\"top_pin_id\":\"\",\"filters\":\"\",\"bookmarks\":[\"" + str(bookmarks) + "\"]},\"context\":{}}"
    }
    try:

        response = requests.post(url, headers=headers, proxies=proxies, cookies=cookies, data=data,timeout=20)
        return response.json()
    except:
        print(kw,'重试中………………休息5秒')
        time.sleep(5)
        return get_list(kw, bookmarks)


@retry(stop_max_attempt_number=4)
def get_content_detail(id):
    headers = {
        "authority": "www.pinterest.com",
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
        "cache-control": "max-age=0",
        "referer": "https://www.pinterest.com/",
        "sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Microsoft Edge\";v=\"114\"",
        "sec-ch-ua-full-version-list": "\"Not.A/Brand\";v=\"8.0.0.0\", \"Chromium\";v=\"114.0.5735.110\", \"Microsoft Edge\";v=\"114.0.1823.41\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-model": "\"\"",
        "sec-ch-ua-platform": "\"Linux\"",
        "sec-ch-ua-platform-version": "\"5.15.0\"",
        "sec-fetch-dest": "document",
        "sec-fetch-mode": "navigate",
        "sec-fetch-site": "same-origin",
        "sec-fetch-user": "?1",
        "upgrade-insecure-requests": "1",
        "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.41"
    }
    cookies = {
        "csrftoken": "8197c45d570ad6c576ee046df06cc625",
        "_pinterest_sess": "TWc9PSY2Qk1MczVjeldTTFBlOVp6NHVDOEd2cC9USmJRYThLQm9xbm5GV2Z3SlNldkI5clk4c09NTE4wNHlDVmxHL1RxMmxLNUgrYmVYUmJlUzFOeGV6UVY1eFdXc0diN0J6alJMOENnNkxHRWJZbz0maG5BUkVmTnhYZHFkVmk4VGpRUmtXeVl2dFNNPQ==",
        "_auth": "0",
        "_routing_id": "\"ea2e6ce7-e393-4e31-80b7-181e1f0d7584\"",
        "sessionFunnelEventLogged": "1",
        "hasBeenShownLoginModalInOpen": "true"
    }
    url = f"https://www.pinterest.com/pin/{id}/"
    params = {
        "mt": "login"
    }
    for i in range(5):
        try:
            response = requests.get(url,timeout=20, headers=headers, proxies=proxies, cookies=cookies, params=params)
            return response.text
        except:
            time.sleep(5)
    print(id,'data not get ')
    return None



def parse_content_detail(content):
    src = ''
    description = ''
    tag_list = []
    title = ''
    try:
        pattern = re.compile(r'<div class="XiG zI7 iyn Hsu".*?</div>')
        if pattern.search(content):
            tmp = pattern.search(content).group()
            # print(tmp)
            pattern = re.compile(r'src=".*?"')
            if pattern.search(tmp):
                src = pattern.search(tmp).group()
                src = src.strip('src="').strip('"')

                pattern = re.compile(r'<div data-test-id="pinTitle" .*?</div>')
                if pattern.search(content):
                    tmp = pattern.search(content).group()
                    # print(tmp)
                    pattern = re.compile(r'>.*?</h1')
                    try:
                        title = pattern.search(tmp).group()
                        title = title.strip('</h1').split('>')[-1]
                    except:
                        pass

                    # print('title:', title)
                pattern = re.compile(r'<div data-test-id="truncated-description" .*?</div>')
                if pattern.search(content):
                    tmp = pattern.search(content).group()
                    # print(tmp)
                    pattern = re.compile(r'<span>.*?</span>')
                    try:
                        description = pattern.search(tmp).group()
                        description = description.strip('<span>').strip('</span>')
                    except:
                        pass
                    # print('description:', description)
                pattern = re.compile(r'<div class="XbT lnZ wsz zI7 iyn Hsu" .*?</div></div>')
                if pattern.search(content):
                    tmp = pattern.search(content).group()
                    try:
                        soups = BeautifulSoup(tmp, 'lxml')
                        data = soups.find_all('span')
                        for d in data:
                            tag_list.append(d.get_text())
                    except:
                        pass
                return [src, title, description, tag_list]
    except Exception as e:
        print(e)
    return None


def parse_list(list_content):
    bookmarks =''
    # linshi
    resource = list_content.get("resource_response", None)
    if resource:
        bookmarks = resource.get("bookmark", None)
        print('bookmarks：',bookmarks)
        pass
    results = []
    data = list_content.get("resource_response", None).get("data", None)
    if data:
        results = data.get("results", None)
        pass

    return results, bookmarks


if __name__ == '__main__':
    # js = get_list("汽车","")
    # print(js)
    id = '639229740873948294'

    content = get_content_detail(id)
    # print(content)
    pattern = re.compile(r'<div class="XiG zI7 iyn Hsu".*?</div>')
    if pattern.search(content):
        tmp = pattern.search(content).group()
        print(tmp)
        pattern = re.compile(r'src=".*?"')
        src = pattern.search(tmp).group()
        src = src.strip('src="').strip('"')
        print(src)

    pattern = re.compile(r'<div data-test-id="pinTitle" .*?</div>')
    if pattern.search(content):
        tmp = pattern.search(content).group()
        # print(tmp)
        pattern = re.compile(r'>.*?</h1')
        title = pattern.search(tmp).group()
        title = title.strip('</h1').split('>')[-1]
        print('title:', title)
    pattern = re.compile(r'<div data-test-id="truncated-description" .*?</div>')
    if pattern.search(content):
        tmp = pattern.search(content).group()
        # print(tmp)
        pattern = re.compile(r'<span>.*?</span>')
        description = pattern.search(tmp).group()
        description = description.strip('<span>').strip('</span>')
        print('description:', description)
    pattern = re.compile(r'<div class="XbT lnZ wsz zI7 iyn Hsu" .*?</div></div>')
    tag_list = []
    if pattern.search(content):
        tmp = pattern.search(content).group()
        # print(tmp)
        # pattern = re.compile(r'<span>.*?</span>')
        # description  = pattern.search(tmp).group()
        # description =description.strip('<span>').strip('</span>')
        # print('description:',description)
        soups = BeautifulSoup(tmp, 'lxml')
        data = soups.find_all('span')
        for d in data:
            tag_list.append(d.get_text())
        print('tag_list', tag_list)
