#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :vk_auto_design_dev.py
# @Time      :2023/8/14
# @Author    :CL
# @email     :1037654919@qq.com
import time
from multiprocessing import Pool
from bs4 import BeautifulSoup
import json
import os
import requests
from utils import MongoDBUtil, get_html, mongo_manager
import random
from tqdm import tqdm
MongoDBUtil = MongoDBUtil()
vk_post_id = mongo_manager("vk_post_id", db="car_images")
vk = mongo_manager("vk", db="car_images")


def get_proxy():
    port = random.randint(24000, 24400)
    proxies = {'http': f'http://zheng123:zheng123@haproxy.iinti.cn:{port}',
               'https': f'http://zheng123:zheng123@haproxy.iinti.cn:{port}'}
    return proxies

proxies = {'http': '127.0.0.1:15732',
           'https': '127.0.0.1:15732'}



def parse_html(html):
    with open('html_txt.txt', 'r', encoding='utf-8') as f:
        htmls = f.read()

    soup = BeautifulSoup(htmls, 'lxml')
    datas = soup.find_all('div', class_='_post_content')
    for data in datas:
        wall_post_text = data.find('div', class_='wall_post_text')
        PostHeaderTitle__authorName = data.find('span', class_='PostHeaderTitle__authorName')
        imgs = data.find_all('img', loading=True)
        try:
            print(PostHeaderTitle__authorName)
            print(wall_post_text.text)
            print(imgs)
        except:
            pass
        break


def get_page(owner_id='-42410379', name='auto_dsgn', wall_start_from=0):
    import requests

    if wall_start_from == 0:
        offset = 0
    else:
        offset = wall_start_from - 1
    headers = {
        "authority": "vk.com",
        "accept": "*/*",
        "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
        "content-type": "application/x-www-form-urlencoded",
        "origin": "https://vk.com",
        "referer": f"https://vk.com/{name}",
        "sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Microsoft Edge\";v=\"114\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Linux\"",
        "sec-fetch-dest": "empty",
        "sec-fetch-mode": "cors",
        "sec-fetch-site": "same-origin",
        "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.58",
        "x-requested-with": "XMLHttpRequest"
    }
    cookies = {
        "remixstlid": "9094564608919029073_5sk2aA9XbrRVPy2QPjq7J0WdhSzxP9Jla8OYRhpaMdL",
        "remixstid": "1617929205_Lpb5vZ3XwIQzJXKzQfU2Mvx3lROOFQtuZka5AzKHLjw",
        "remixnp": "0",
        "remixscreen_width": "1920",
        "remixscreen_height": "1080",
        "remixscreen_dpr": "1",
        "remixscreen_depth": "24",
        "remixscreen_orient": "1",
        "remixdark_color_scheme": "0",
        "remixcolor_scheme_mode": "auto",
        "remixdt": "18000",
        "tmr_lvid": "3a4c284a606dbaebece943d37c5f9640",
        "tmr_lvidTS": "1691562613958",
        "remixua": "190%7C-1%7C194%7C3556042418",
        "remixuas": "YjM5NDFiMmI2YWQxMzE4YmFhNjM3N2Rh",
        "remixsuc": "1%3A",
        "remixlang": "0",
        "remixseenads": "1",
        "remixrefkey": "443e46dbdcb70b0612",
        "remixlgck": "ad14e1ab1716d6d632",
        "remixuacck": "494504fdec45aee787",
        "remixscreen_winzoom": "1",
        "remixgp": "8d298097080a91de983e85882b77a3db",
        "tmr_detect": "0%7C1692236512472"
    }
    url = "https://vk.com/al_wall.php"
    params = {
        "act": "get_wall"
    }
    data = {
        "act": "get_wall",
        "al": "1",
        "fixed": "15121",
        "offset": offset,
        "onlyCache": "false",
        "owner_id": owner_id,
        "type": "own",
        "wall_start_from": wall_start_from
    }
    response = requests.post(url, headers=headers, cookies=cookies, params=params, data=data)

    # print(response.text)
    print(response)
    return response.json()


def save_image(name, url, PWD):
    print('save image:', url)
    # name = url.split('/')[-1]
    # 将图片以最后的数字+后缀命名，方便检查文件是否存在
    filename = PWD + name
    if os.path.isfile(filename):  # 如果文件已爬取，则不再重复爬取
        print("文件存在：", filename)
        return 2
    # urllib.request.urlretrieve(url, filename=path+filename)
    try:
        response = requests.get(url, proxies=proxies, stream=True, timeout=30)
        with open(filename, 'wb') as fd:
            fd.write(response.content)
        requests.session().close()
        return 1
    except  Exception as e:
        print('error6:保存图片失败',url,  e)
        return 0


def save_summary(path, file, jsondata):
    with open(path + file, 'w') as f:
        f.write(json.dumps(jsondata, ensure_ascii=False))  # str 转换成JSON字符串时，中文字符会被转义,使用 ensure_ascii=False显示中文
    f.close()


def save_txt(txt_filename, data, PWD):
    with open(PWD + txt_filename, 'w') as f:
        f.write(data)
    f.close()
def get_wall_page(url):
    print('begin 访问：',url)
    headers = {
        "authority": "vk.com",
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
        "cache-control": "max-age=0",
        "sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Microsoft Edge\";v=\"114\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Linux\"",
        "sec-fetch-dest": "document",
        "sec-fetch-mode": "navigate",
        "sec-fetch-site": "none",
        "sec-fetch-user": "?1",
        "upgrade-insecure-requests": "1",
        "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.58"
    }
    cookies = {
        "remixstlid": "9094564608919029073_5sk2aA9XbrRVPy2QPjq7J0WdhSzxP9Jla8OYRhpaMdL",
        "remixstid": "1617929205_Lpb5vZ3XwIQzJXKzQfU2Mvx3lROOFQtuZka5AzKHLjw",
        "remixnp": "0",
        "remixscreen_width": "1920",
        "remixscreen_height": "1080",
        "remixscreen_dpr": "1",
        "remixscreen_depth": "24",
        "remixscreen_orient": "1",
        "remixdark_color_scheme": "0",
        "remixcolor_scheme_mode": "auto",
        "remixdt": "18000",
        "tmr_lvid": "3a4c284a606dbaebece943d37c5f9640",
        "tmr_lvidTS": "1691562613958",
        "remixua": "190%7C-1%7C194%7C3556042418",
        "remixuas": "YjM5NDFiMmI2YWQxMzE4YmFhNjM3N2Rh",
        "remixsuc": "1%3A",
        "remixlang": "0",
        "remixseenads": "1",
        "remixrefkey": "443e46dbdcb70b0612",
        "remixlgck": "ad14e1ab1716d6d632",
        "remixuacck": "494504fdec45aee787",
        "remixgp": "8d298097080a91de983e85882b77a3db",
        "remixscreen_winzoom": "2.41",
        "tmr_detect": "0%7C1692239159608"
    }
    for __ in range(5):
        try:
            response = requests.get(url, headers=headers, cookies=cookies,proxies=proxies,timeout=10)
            return response.text
        except Exception as e:
            print("Error:", e)
            time.sleep(5)
            pass
    requests.session().close()
    print('error7:get_wall_page wrong,')
    return None

def get_photo_page(post_id =  '-42410379_12269'):
    list_id = "wall{}".format(post_id)
    import requests

    headers = {
        "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0",
        "Accept": "*/*",
        "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
        "Accept-Encoding": "gzip, deflate, br",
        "Content-Type": "application/x-www-form-urlencoded",
        "X-Requested-With": "XMLHttpRequest",
        "Origin": "https://vk.com",
        "Authorization": "Basic Xzp2azEuYS52TzFaVklZcmVhRWtESVBJaTBJVmVPZ29QTTlvNUhKOEtOdGZDck5QdHU0S1hEdW00Q0EzSHBIdk5XNDFyQWJ6eDljTkZ3OUlFQzA3cFVQMGVBSXpncjlTSzVnMXRxTXBmclFpOVhvWjZNelZnWFAwQzhKWHkxSmkzU1YzVnNMTjIxS3RSYThBRWlELTdscTkyUl9oa2dJSllkdnpGMHprWFlCbXk0V0pGOE5HRmFVdHNWOUJYeExBSjNQb1BHZ0o=",
        "Connection": "keep-alive",
        "Referer": "https://vk.com/wall-42410379_12269",
        "Sec-Fetch-Dest": "empty",
        "Sec-Fetch-Mode": "cors",
        "Sec-Fetch-Site": "same-origin",
        "Pragma": "no-cache",
        "Cache-Control": "no-cache",
        "TE": "trailers"
    }
    cookies = {
        "remixlang": "0",
        "remixstlid": "9096815899615316768_AS4yBmOZvtzjPelxZLfHP9ViZIsDX7NZvBz0SEwGtZP",
        "remixua": "189%7C-1%7C320%7C3885329045",
        "remixstid": "729063903_s6o9iPi4NjVsx99qgntP61lWe2IelZp2gC0fZFK3RtD",
        "remixrefkey": "7f161f89bec1c6caa1",
        "remixscreen_width": "1920",
        "remixscreen_height": "1080",
        "remixscreen_dpr": "1",
        "remixscreen_depth": "24",
        "remixscreen_winzoom": "2.26",
        "remixdark_color_scheme": "0",
        "remixcolor_scheme_mode": "auto",
        "remixdt": "18000",
        "tmr_lvid": "fc5798e4186ee953461c78ab50777cbd",
        "tmr_lvidTS": "1691053490045",
        "remixuas": "YThmYzk4MTIwNThjNTc3NjgzNmYwZGM0",
        "remixscreen_orient": "1",
        "remixsuc": "1%3A",
        "remixsid": "1_SMOopPVBmISLFG7FNAYT19K0HWiL-1GIcq_lBXSjKNQsAwJfA7rwK_6Uzi-5FypwRjtbBlCxn8X1m52996a-5Q",
        "remixnsid": "vk1.a.026I_p7HQi181ayvKaTYexIlshCjoOXBe_xX6usciLlZkzxdOSXzDQqK8jqgf_YKzvWvptwG2LzwPgeZDe666QS5qM_hGmh64jS5_gUXuqBjUW7Jq7MBI1vKJRoBd5TaTn44ifbtkvlJoMIlpKqZNQv1FK49-pzpxZ9c4EJdHvMc6nARpz8n9NTIyLPlyMRG",
        "remixdmgr": "722bb5b152f9c04a11ddc37daeb1c30b066c8f2facca90633b8ec0ce2b0ed4b1",
        "remixseenads": "1",
        "remixpuad": "IcVUv8BegYzeyJNVNfbKn2Blb0gJyOS8fV2xWc7gtdU",
        "remixuacck": "5540fd50370db9d45b",
        "remixgp": "8a4751a935273e3caf4845feabeb1801",
        "remixnp": "0",
        "tmr_detect": "0%7C1692585731665"
    }
    url = "https://vk.com/al_photos.php"
    params = {
        "act": "show"
    }
    data = {
        "act": "show",
        "al": "1",
        "al_ad": "0",
        "dmcah": "",
        "list": list_id,
        "module": "wall",
        # "photo": "-42410379_457321487"
    }
    for __ in range(5):
        try:
            response = requests.post(url, headers=headers, cookies=cookies,proxies=proxies, params=params, data=data)
            if response.status_code == 200:
                return response.json()
        except:
            time.sleep(10)
    print('error4:no get get_photo_page')
    return None
def get_photo_page2(album_id = "album-42410379_274754123"):

    headers = {
        "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0",
        "Accept": "*/*",
        "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
        "Accept-Encoding": "gzip, deflate, br",
        "Content-Type": "application/x-www-form-urlencoded",
        "X-Requested-With": "XMLHttpRequest",
        "Origin": "https://vk.com",
        "Authorization": "Basic Xzp2azEuYS52TzFaVklZcmVhRWtESVBJaTBJVmVPZ29QTTlvNUhKOEtOdGZDck5QdHU0S1hEdW00Q0EzSHBIdk5XNDFyQWJ6eDljTkZ3OUlFQzA3cFVQMGVBSXpncjlTSzVnMXRxTXBmclFpOVhvWjZNelZnWFAwQzhKWHkxSmkzU1YzVnNMTjIxS3RSYThBRWlELTdscTkyUl9oa2dJSllkdnpGMHprWFlCbXk0V0pGOE5HRmFVdHNWOUJYeExBSjNQb1BHZ0o=",
        "Connection": "keep-alive",
        "Referer": "https://vk.com/wall-42410379_4621?z=album-42410379_274754123",
        "Sec-Fetch-Dest": "empty",
        "Sec-Fetch-Mode": "cors",
        "Sec-Fetch-Site": "same-origin",
        "Pragma": "no-cache",
        "Cache-Control": "no-cache",
        "TE": "trailers"
    }
    cookies = {
        "remixlang": "0",
        "remixstlid": "9096815899615316768_AS4yBmOZvtzjPelxZLfHP9ViZIsDX7NZvBz0SEwGtZP",
        "remixua": "189%7C-1%7C320%7C3885329045",
        "remixstid": "729063903_s6o9iPi4NjVsx99qgntP61lWe2IelZp2gC0fZFK3RtD",
        "remixrefkey": "7f161f89bec1c6caa1",
        "remixscreen_width": "1920",
        "remixscreen_height": "1080",
        "remixscreen_dpr": "1",
        "remixscreen_depth": "24",
        "remixscreen_winzoom": "2.26",
        "remixdark_color_scheme": "0",
        "remixcolor_scheme_mode": "auto",
        "remixdt": "18000",
        "tmr_lvid": "fc5798e4186ee953461c78ab50777cbd",
        "tmr_lvidTS": "1691053490045",
        "remixuas": "YThmYzk4MTIwNThjNTc3NjgzNmYwZGM0",
        "remixscreen_orient": "1",
        "remixsuc": "1%3A",
        "remixsid": "1_SMOopPVBmISLFG7FNAYT19K0HWiL-1GIcq_lBXSjKNQsAwJfA7rwK_6Uzi-5FypwRjtbBlCxn8X1m52996a-5Q",
        "remixnsid": "vk1.a.026I_p7HQi181ayvKaTYexIlshCjoOXBe_xX6usciLlZkzxdOSXzDQqK8jqgf_YKzvWvptwG2LzwPgeZDe666QS5qM_hGmh64jS5_gUXuqBjUW7Jq7MBI1vKJRoBd5TaTn44ifbtkvlJoMIlpKqZNQv1FK49-pzpxZ9c4EJdHvMc6nARpz8n9NTIyLPlyMRG",
        "remixdmgr": "722bb5b152f9c04a11ddc37daeb1c30b066c8f2facca90633b8ec0ce2b0ed4b1",
        "remixseenads": "1",
        "remixpuad": "IcVUv8BegYzeyJNVNfbKn2Blb0gJyOS8fV2xWc7gtdU",
        "remixuacck": "5540fd50370db9d45b",
        "remixgp": "8a4751a935273e3caf4845feabeb1801",
        "remixnp": "0",
        "tmr_detect": "0%7C1692610428836"
    }
    url = "https://vk.com/al_photos.php"
    params = {
        "act": "show"
    }
    data = {
        "act": "show",
        "al": "1",
        "al_ad": "0",
        "dmcah": "",
        "list": album_id, #   "album-42410379_274754123"
        "module": "wall",
        # "photo": "-42410379_457275120"
    }

    for __ in range(5):
        try:
            response = requests.post(url, headers=headers, cookies=cookies,proxies=proxies, params=params, data=data)
            if response.status_code == 200:
                return response.json()
        except:
            time.sleep(10)
    print('error4:no get get_photo_page')
    return None
def test_get_data(seed ={'_id':'-42410379_4621'}):
    url = 'https://vk.com/wall' + seed['_id']
    # own_name = seed['name']
    res = get_wall_page(url)
    soups = BeautifulSoup(res, 'lxml')
    data = soups.find('div', class_='_post_content')


    response = get_photo_page(post_id=seed['_id'])
    album_id = data.find('a', class_='MediaGrid__interactive')['href']
    print(album_id)
    response2 = get_photo_page2(album_id=str(album_id).split('/')[-1])
    print(response['payload'][1][3])
    print('asdasdasda')
    print(response['payload'][1][3])
def get_data(seed):
    url = 'https://vk.com/wall' + seed['_id']
    own_name =seed['name']
    res = get_wall_page(url)
    soups = BeautifulSoup(res, 'lxml')
    datas = soups.find_all('div', class_='_post_content')
    print('len(datas)',len(datas))
    for data in datas:
        vk_data = mongo_manager("vk", db="car_images")
        try:
            # post_id = data.find('h5', class_='PostHeaderTitle__author').find('a')['data-post-id']
            post_id =seed['_id']
            # # 判断是否已经爬取,有的话就跳过
            # result = vk.findOne({"_id": post_id})
            # if result is None:
            #     pass
            # else:
            #     continue
            file = '{}/{}/'.format(own_name, post_id)
            if os.path.exists(file):
                names = os.listdir(file)
                if len(names) > 1 and 'summary.json' in names:
                    print(file, '文件夹已经存在,文件已经爬取')
                    seed['status'] = 'success'

                    vk_data.updateOne({'_id': seed['_id']}, seed)
                    vk_data.close()
                    continue
            else:
                os.mkdir(file)
            # name_href = data.find('h5', class_='PostHeaderTitle__author').find('a')['href']
            try:
                wall_post_text = data.find('div', class_='wall_post_text')
                infos = wall_post_text.get_text()
                title = infos.split('#')[0]
                tags = infos.split('#')[1:-1]
            except:
                title = '无标题'
                tags = []
            try:
                PostHeaderTitle__authorName = data.find('span', class_='PostHeaderTitle__authorName').get_text()
            except:
                PostHeaderTitle__authorName = ''

            # 获取zip_href
            try:
                zip_href = \
                    data.find('a',
                              class_='SecondaryAttachment js-SecondaryAttachment SecondaryAttachment--interactive')[
                        'href']
                zip_href = 'https://vk.com' + zip_href
                # print(zip_href)
            except:
                zip_href = ''
            images = []
            image_hrefs = []

            # 获取图片，核心部分
            try:
                # # 方法一
                # thumbs=data.find('div', class_='MediaGrid MediaGrid--twoRow').find_all('div',style =True)
                # # print(len(thumbs))
                # for  thumb in thumbs:
                #     #  取得大图链接,该方法获取img失败，因为涉及到网页内部的弹窗
                #     photo_id = thumb.find('div')['data-photo-id']
                #     photo_url = 'https://vk.com/wall{0}?z=photo{1}%2Fwall{0}'.format(post_id,photo_id)
                #     response = get_wall_page(photo_url)
                #     soup = BeautifulSoup(response, 'lxml')
                #     print(soup)
                #     img = soup.find('div', id = 'pv_photo').find('img')
                #     href = str(img['src']).replace(';', '&')

                # # 方法二
                # imgs = data.find_all('img', loading=True)
                # # print(imgs)
                # for img in imgs:
                #     #  取得小图链接,该方法获取到的是小图链接
                #     href = str(img['src']).replace(';', '&')
                #     # print(href)

                # # 方法三
                response = get_photo_page(post_id=post_id)
                datas  = datas = response['payload'][1][3]
                # album_id  = data.find('a', class_='MediaGrid__interactive')['href']
                # response2 = get_photo_page2(album_id=str(album_id).split('/')[-1])
                # datas2 = response2['payload'][1][3]



                if own_name == 'auto_dsgn':   # auto_dsgn的作品不要最后两个
                    if len(datas) >= 3:
                        datas.pop()
                        datas.pop()
                for data2 in datas:
                    # print(data['w_'][0])
                    href = data2['w_'][0]
                    print(href)
                    image_hrefs.append(href)
                    name = href.split('/')[-1].split('?')[0]
                    # 保存数据
                    result = save_image(name=name, url=href, PWD=file)
                    # 保存文本
                    if result == 1:
                        txt_filename = name.split('.')[0] + '.txt'
                        description = ''
                        txt_data = str(title) + ',' + str(description)
                        for tag in tags:
                            txt_data += ',' + str(tag)
                        save_txt(txt_filename=txt_filename, data=txt_data, PWD=file)
                        images.append(
                            {
                                "img_file": name,
                                "label_file": txt_filename,
                                "tag": tags,
                                "title": title,
                                "description": str(description)
                            })
                        # datas['status'] = 'success'

            except  Exception as e:
                print('error2',e)

            save_data = {}
            save_data['_id'] = post_id
            save_data['title'] = title
            save_data['owner_id'] = seed['owner_id']
            save_data['name'] = seed['name']
            save_data['wall_start_from'] = seed['wall_start_from']
            save_data['authorName'] = PostHeaderTitle__authorName
            save_data['zip_href'] = zip_href
            # save_data['description'] = description
            save_data['tags'] = tags
            save_data['url'] = url
            save_data['image_hrefs'] = image_hrefs
            success_count = len(images)
            save_data['success_count'] = success_count

            # 保存save_summary
            if success_count > 0:
                save_data['status'] = 'success'
                jsondata = {"page_url": url, "images": images}
                save_summary(path=file, file='summary.json', jsondata=jsondata)
            else:
                save_data['status'] = 'unsuccess'
            # 保存save_save_data
            try:
                MongoDBUtil.insert_one('vk', save_data)
            except Exception as e:
                print('error3',e)
                vk_data.updateOne({'_id': save_data['_id']}, save_data)
                vk_data.close()
        except Exception as e:
            print('error1',e)
            pass


def get_proxy():
    proxy_dict = {
        "http": "http://118.190.149.190:9999",
        "https": "http://118.190.149.190:9999",
    }
    return proxy_dict
def get_posts_id():
    # 提前获取到的auto相关的网站
    own_info = { -42410379: 'auto_dsgn',-184151678: 'autonew_vk','jdm_korch18':-270640827,'arbuzcm':-212701142}
    for own_id, own_name in own_info.items():
        wall_start_from = 0
        print('begin:', own_id, 'https://vk.com/' + own_name)
        path = own_name + '/'
        if os.path.exists(path):
            pass
        else:
            os.mkdir(path)
        page = 0
        while True:
            page += 1
            res = get_page(owner_id=str(own_id), name=own_name, wall_start_from=wall_start_from)
            # print(res['payload'][1][0])
            html_data = res['payload'][1][0]
            soups = BeautifulSoup(html_data, 'lxml')
            datas = soups.find_all('div', class_='_post_content')
            print('name:{},第{}页，有{}条数据'.format(own_name, page, len(datas)))
            for data in datas:
                try:
                    post_id = data.find('h5', class_='PostHeaderTitle__author').find('a')['data-post-id']

                    # 保存post_id
                    seed = {"_id": post_id, "owner_id": own_id, "owner_name": own_name, "wall_start_from": wall_start_from}
                    try:
                        MongoDBUtil.insert_one('vk_post_id', seed)
                    except Exception as e:
                        print(e)

                except:
                    pass
            wall_start_from+=10
            if len(datas) < 10:
                break

def main():
    vk = mongo_manager("vk", db="car_images")
    seeds = vk.findAll({"status":'unsuccess'})
    lists = []
    for seed in seeds:
        lists.append(seed)
    vk.close()
    print('vk len:', len(lists))
    pool = Pool(processes=30)
    pool.map(get_data, lists[:1])
    pool.close()  # 关闭进程池，不再接受新的进程
    pool.join()
    # # test
    # get_data(lists[0])

def add_data():
    # 恢复mongodb数据
    vk = mongo_manager("vk", db="car_images")
    seeds = vk.findAll()
    lists =[]
    for seed in seeds:
        seed['status'] = None
        lists.append(seed)
        vk.updateOne({'_id': seed['_id']}, seed)
    vk.close()
    print(len(lists))

def test_get_page():
    # test
    res = get_page()
    # print(res['payload'][1][0])
    html_data = res['payload'][1][0]
    soups = BeautifulSoup(html_data, 'lxml')
    datas = soups.find_all('div', class_='_post_content')
    print(len(datas))
    for data in datas:
        print(data)
        break

if __name__ == '__main__':
    # # 获取所有post_id
    # get_posts_id()
    # # 恢复数据
    # add_data()
    # main()
    test_get_data()

    # # test
    # response = get_photo_page()
    # print(response)
    # datas = response['payload'][1][3]
    # print(type(datas))
    # for data in datas:
    #     print(data['w_'][0])


    pass
