# -*- coding: utf-8 -*-
"""
Created on Thu Aug 25 14:03:39 2022

@author: xiong
"""

import logging
import time
from urllib import parse
import re
import json
from bs4 import BeautifulSoup
from requests_html import HTMLSession
import hashlib
logger = logging.getLogger("facebook_spider")

from SqlExecuter import SqlExecuter
from  MqPusher import MqPusher
from YmlReader import YmlReader
from OutputTemplate import mqPostsTemplate
from OutputTemplate import mqSharesTemplate
from OutputTemplate import mqLikesTemplate
from OutputTemplate import mqReplysTemplate


class FacebookScraper:

    base_url = 'https://m.facebook.com'
    default_headers = {
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'sec-fetch-dest': 'document',
        'sec-fetch-mode': 'navigate',
        'sec-fetch-site': 'none',
        "Sec-Fetch-User": "?1", #操作由人触发 ?! == true ,?0 == false
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36',
    }
    default_cookies = "sb=igmXYWF-QRw-zRb-23p92dD9; datr=igmXYV00y0og6Pxt8zggCvLP; wd=1600x732; c_user=100082336090638; presence=C%7B%22t3%22%3A%5B%5D%2C%22utc3%22%3A1655969594673%2C%22v%22%3A1%7D; xs=25%3A_JFPY9ZoxpKfBg%3A2%3A1655884815%3A-1%3A-1%3A%3AAcUQ1BFP5SA8STHfQz4QRGe3Ff8LRkMnIMwhetK98w; fr=0qe7fXgQ81SduVTLd.AWUq0Utf24LnJ5_0b05m_0PrAA4.BitBc-.aq.AAA.0.0.BitBc-.AWWo1DnMpqw"
    
    def __init__(self,target_id = "", session=None,taskid = "noid"):
        conf1 = YmlReader()
        conf = conf1.settings
        username,password,host,port,virtual_host,video_queue,pic_queue,datacenter_queue = (conf["rabbitmq"]["username"],
                                                                                            conf["rabbitmq"]["password"],
                                                                                            conf["rabbitmq"]["host"],
                                                                                            conf["rabbitmq"]["port"],
                                                                                            conf["rabbitmq"]["virtual-host"],
                                                                                            conf["rabbitmq"]["video-queue"],
                                                                                            conf["rabbitmq"]["pic-queue"],
                                                                                            conf["rabbitmq"]["datacenter-queue"],)
        self.MqPusher_video = MqPusher(username,password,host,port,virtual_host,video_queue)                                                               
        self.MqPusher_pic = MqPusher(username,password,host,port,virtual_host,pic_queue)          
        self.MqPusher_datacenter = MqPusher(username,password,host,port,virtual_host,datacenter_queue)          
        if session is None:
            session = HTMLSession()
        self.taskid = taskid
        self.target_id = target_id
        self.SqlExecuter = SqlExecuter(taskid,conf1)
        sql = """SELECT * FROM cfg_push where vc_topic = '%s' and vc_connect_status = 1 """  %(datacenter_queue)  
        rcv = SqlExecuter.execute_sql(sql,return_value = True) 
        if not rcv:
            logger.info("datacenter queue is not valid")
            self.MqPusher_datacenter.valid = False
        self.SqlExecuter
        self.session = session
        self.posts_url = []
        self.mannerdict = {"":""}
        self.user_id = "nouserid"
    def set_headers(self,headers):
        if not isinstance(headers,dict):
            print("Error : headers should be a dictionary")
            return 
        logger.info("set headers success")
        self.default_headers = headers
    
    def set_cookies(self,cookies):
        if not isinstance(cookies,dict) and not isinstance(cookies,str):
            print("Error : cookies should be a dictionary or a string")
            return 
        if isinstance(cookies,str) and cookies:
            c = cookies.split(";")
            cookies = {}
            for i in c:
               cookies[i.split("=")[0].strip()] = i.split("=")[1].strip()

        self.default_cookies = cookies
        
        logger.info("set cookies success")
        
    def set_proxies(self,proxies):
        if not isinstance(proxies,dict):
            print("Error : proxies should be a dictionary")
            return 
        logger.info("set proxies success")
        self.default_proxies = proxies
    def set_target_id (self,target_id):
        logger.info("set target_id success:"+target_id)
        self.target_id = target_id
        
        
    def extract_postdata(self,text):
        dtsg,jazoest,lsd,post_url,ajaxid = "","","","",""
        
        datr_search = re.search('"dtsg":{"token":".+?"', text, re.DOTALL)
        if datr_search:
            dtsg = datr_search.group(0).replace('"dtsg":{"token":"',"")[:-1]

        datr_search = re.search('"jazoest", ".+?"', text, re.DOTALL)
        if datr_search:
            jazoest = datr_search.group(0).replace('"jazoest", "',"")[:-1]

        datr_search = re.search('"LSD",\[\],{"token":".+?"', text, re.DOTALL)
        if datr_search:
            lsd = datr_search.group(0).replace('"LSD",[],{"token":"',"")[:-1]

        datr_search = re.findall(r'"InitMMoreItemAutomatic".*?,href:"(.*?)"', text, re.DOTALL)
        if datr_search:
            post_url = datr_search[0]

        datr_search = re.search('"encrypted":".+?"', text, re.DOTALL)
        if datr_search:
            ajaxid = datr_search.group(0).replace('"encrypted":"',"")[:-1]

            
        post_data = {"fb_dtsg":dtsg ,
        "jazoest": jazoest,
        "lsd": lsd ,
        "_a":ajaxid}
        next_page_url = self.base_url + post_url
        if not dtsg or not lsd or not post_url or not ajaxid:
            return None,None
        return post_data,next_page_url
    
    def next_page(self,text,jazoest):
        dtsg,lsd,post_url,ajaxid = "","","",""
        datr_search = re.search('"dtsg":{"token":".+?"', text, re.DOTALL)
        if datr_search:
            dtsg = datr_search.group(0).replace('"dtsg":{"token":"',"")[:-1]

        datr_search = re.search('"LSD",\[\],{"token":".+?"', text, re.DOTALL)
        if datr_search:
            lsd = datr_search.group(0).replace('"LSD",[],{"token":"',"")[:-1]

        datr_search = re.findall(r'"InitMMoreItemAutomatic".*?,"href":"(.*?)"', text, re.DOTALL)
        if datr_search:
            post_url = datr_search[0].encode("utf8").decode("unicode_escape").replace("\/","/")

        datr_search = re.search('"encrypted":".+?"', text, re.DOTALL)
        if datr_search:
            ajaxid = datr_search.group(0).replace('"encrypted":"',"")[:-1]
        if not dtsg or not lsd or not post_url or not ajaxid:
            return None,post_url
        post_data = {"fb_dtsg":dtsg ,
        "jazoest": jazoest,
        "lsd": lsd ,
        "_a":ajaxid}
        
        next_page_url = self.base_url + post_url
        
        if not dtsg or not lsd or not post_url or not ajaxid:
            return None,None
        
        return post_data,next_page_url
    
    
    def get_public_brief_info(self,url): 
        if isinstance(self.default_cookies,str):
            c = self.default_cookies.split(";")
            self.default_cookies = {}
            for i in c:
                self.default_cookies[i.split("=")[0].strip()] = i.split("=")[1].strip()

        r = self.session.get(url, cookies=self.default_cookies, headers=self.default_headers,proxies = self.default_proxies,timeout = 5)
        localtime = time.localtime(time.time())
        


        # 提取信息
        infodic = {'vc_id': '',
               'vc_business_hours': '',
               'vc_business_info': '', #
               'vc_business_story': '', #
               'vc_contact_info': '',#
               'dt_gather_time': '',#
               'vc_home_page_info': '',
               'vc_interests_info': '',
               'vc_md5': '',
               'vc_more_info': '', #
               'vc_normal_user_info': '',
               'vc_other_info': '',#
               'vc_task_id': '',
               'vc_team_members': '',
               'vc_userid': ''}


        if localtime:
            infodic['dt_gather_time'] = localtime

        vc_business_story = r.html.find('meta[name = "description"]')
        if vc_business_story:
            infodic["vc_business_story"] = vc_business_story[0].attrs["content"]

        soup = BeautifulSoup(r.text)

        bsdic = {
            "website": 'https://static.xx.fbcdn.net/rsrc.php/v3/y5/r/ZWx4MakmUd4.png',
            'vc_business_info': 'https://static.xx.fbcdn.net/rsrc.php/v3/yV/r/_6QbEglrVsx.png',
            "vc_contact_info": 'https://static.xx.fbcdn.net/rsrc.php/v3/yn/r/IIz7DmH3RfV.png',
            "vc_other_info": 'https://static.xx.fbcdn.net/rsrc.php/v3/yG/r/yzxwaDMdZAx.png',
            "address": "https://static.xx.fbcdn.net/rsrc.php/v3/y8/r/PwUkFLBBA85.png",

        } #图片对应的小标签 用于获取简介标签文本
        for i in bsdic.keys():
            if i in infodic.keys():
                if soup.select(f'img[src = "{bsdic[i]}"]'):
                    infodic[i] = soup.select(f'img[src = "{bsdic[i]}"]')[0].nextSibling.text
            else:
                if soup.select(f'img[src = "{bsdic[i]}"]'):
                    infodic['vc_more_info']  += i + ":" + str(soup.select(f'img[src = "{bsdic[i]}"]')[0].nextSibling.text) + ",\n"
        return infodic

    def get_posts_url(self,url,isFirstpage = 1,post_data = {}): #一次请求,
        if isinstance(self.default_cookies,str):
            c = self.default_cookies.split(";")
            self.default_cookies = {}
            for i in c:
                self.default_cookies[i.split("=")[0].strip()] = i.split("=")[1].strip()
###     

        if isFirstpage :
            r = self.session.get(url, cookies=self.default_cookies,headers=self.default_headers,proxies = self.default_proxies,)
            soup = BeautifulSoup(r.text)
        else:
            r = self.session.get(url, cookies=self.default_cookies,headers=self.default_headers,proxies = self.default_proxies,)
            try:
                soup = r.text.replace("for (;;);","")
                jsontext = json.loads(soup)
                soup = BeautifulSoup(jsontext["payload"]["actions"][0]["html"])
                code = jsontext["payload"]["actions"][2]["code"]
            except:
                return "","",soup
            
        self.posts_url += list(set(re.findall(r"story_fbid=(.+?)&",str(soup))))
        if list(set(re.findall(r"story_fbid=.+?&amp;id=(.*?)&",str(soup)))):
            self.user_id = list(set(re.findall(r"story_fbid=.+?&amp;id=(.*?)&",str(soup))))[0]
        if isFirstpage :
            post_data,next_page_url = self.extract_postdata(r.text)
        else:
            temp_data,next_page_url = self.next_page(code,post_data["jazoest"])
            if temp_data:
                post_data = temp_data
            
        return post_data,next_page_url,soup
    
    def get_public_postsAndImageurlAndComments_from_child_url(self,childurl):
        
        return_info = {"posts_time":  "",
                       "posts_text":              "", #1
                       "image_url_and_description":  [],#2
                       "video_url":               [],#3
                       "comments":                [], #4
                       "post_share_url" : "",
                       "post_like_url" : "",
                       "post_url":childurl,
                       "post_id": re.findall(r"[^b]id=(.*?)&",childurl)[0] if re.findall(r"[^b]id=(.*?)&",childurl) else "" ,
                       "post_fbid":re.findall(r"story_fbid=(.*?)&",childurl)[0] if re.findall(r"story_fbid=(.*?)&",childurl) else "",
                       "dt_gather_time" : time.localtime(time.time()),
                       "nick_name":"",
                       "photo_url":"",
                       "comment_num":"",
                       "like_num":"",
                       "share_num":"",
                       "time":"",
                       "user_id":""
                       }
        
        savepath_md5 = hashlib.md5(str(time.time()).encode(encoding='UTF-8')).hexdigest() 
        get_url = self.base_url +"/story.php?story_fbid=" +childurl + "&id=%s&m_entstream_source=timeline" %self.user_id
        r = self.session.get(get_url,cookies =self.default_cookies,headers=self.default_headers,proxies = self.default_proxies,)
        child_soup= BeautifulSoup(r.text)
        #基本信息
        try:
            grand_soup = BeautifulSoup(child_soup.find_all("div",attrs  = {"class" : "hidden_elem"})[0].string)
            for a in child_soup.find_all("div",attrs = "hidden_e"): 
                print(a.string)
                print()
                try:
                    jsontext = json.loads(a.content)
                    return_info["posts_time"] = jsontext["dateCreated"] 
                    return_info["posts_text"] = jsontext["articleBody"] 
                except:
                    pass
            pic = grand_soup.find_all("i",attrs = {"aria-label": re.compile("profile")})
            if pic:
                elm = pic[0]
                return_info["nick_name"] = elm.attrs["aria-label"].split(",")[0]
                datr_search = re.search('\(.+?\)',elm["style"], re.DOTALL)
                return_info["photo_url"] = parse.unquote(datr_search.group(0)[2:-2].replace(" ","").replace("\\\\","\\").replace("\\","%"))
            
            for i in child_soup.find_all("script"):
                sign = re.findall("share_count:(\d*)",str(i.string))
                if sign:
                    return_info["share_num"] = re.findall("share_count:(\d*)",str(i.string))[0]
                    return_info["comment_num"] = re.findall("comment_count:(\d*)",str(i.string))[0]
                    return_info["like_num"] = re.findall("reactioncount:(\d*)",str(i.string))[0]
                    return_info["user_id"] = re.findall('actor_for_post:"(\d*)"',str(i.string))[0]
            
            grand_child_soup =BeautifulSoup(child_soup.find("div",attrs = {"class":"hidden_elem"}).string)
            #1
            
            for i in grand_child_soup.find_all("p"):
                return_info["posts_text"] += str(i.string)
            return_info["posts_time"] = re.findall(r'"publish_time":([0-9]*?),',str(grand_child_soup))[0] if re.findall(r'"publish_time":([0-9]*?),',str(grand_child_soup)) else ""
            #2   
            
            for i in grand_child_soup.find_all("i"):
                if "style" in i.attrs:
                    datr_search = re.search('\(.+?\)',i["style"], re.DOTALL)
                    if datr_search:
                        image_url = parse.unquote(datr_search.group(0)[2:-2].replace(" ","").replace("\\\\","\\").replace("\\","%")) 
                        savepath = "facebook/" + self.user_id + "/" + hashlib.md5(image_url.encode(encoding='UTF-8')).hexdigest()  + ".jpg"
                        push_data = {"dateTime":time.time(),
                                 "destPath":savepath,
                                 "priority":0,
                                 "resourceType":"images",
                                 "srcPath":image_url}
                        self.MqPusher_pic.provide(push_data)
                        return_info["image_url_and_description"].append([image_url,savepath])
            
            #3 
            video_divs = child_soup.find("div",attrs = {"data-sigil":"inlineVideo"})
            if video_divs:
                for video_div in video_divs:
                    video_url = json.loads(video_div.attrs["data-store"])["src"].replace(r"\/","/")
                    savepath = "facebook/" + self.user_id +  "/" + hashlib.md5(video_url.encode(encoding='UTF-8')).hexdigest()  + ".mp4"
                    
                    push_data = {"dateTime":time.time(),
                                 "destPath":savepath,
                                 "priority":0,
                                 "resourceType":"videos",
                                 "srcPath":video_url}
                    
                    self.MqPusher_video.provide(push_data)
                    return_info["video_url"].append([video_url,savepath]) 
            
            #4
            grand_child_soup2 =BeautifulSoup(child_soup.find_all("div",attrs = {"class":"hidden_elem"})[1].string)
            comments = grand_child_soup2.find_all("div",attrs = {"data-sigil":"comment"})
            
            #5
            post_share_url = grand_child_soup2.find_all("a",attrs = {"href":re.compile("/browse/shares")})[0]["href"]
            return_info["post_share_url"] = post_share_url
            
            #6
            post_like_url = grand_child_soup2.find_all("a",attrs = {"href":re.compile("/ufi/reaction/profile/browser")})[0]["href"]
            return_info["post_like_url"] = post_like_url
            
            def filter_comment_image(tag):
                if "style"in tag.attrs and "aria-label" in tag.attrs and tag.name == "i":
                    if "profile" not in tag["aria-label"]:
                        return True
                return False
            comments_list = []
            if comments:
                for comment in comments:
                    comment_user_url = comment.find_all("a")[0]["href"]
                    comment_user_img = comment.find_all("a")[0].find('i',attrs={"aria-label":True,"style":True})
                    if comment_user_img:
                        comment_user_img = parse.unquote(re.search('\(.+?\)',comment_user_img["style"], re.DOTALL).group(0)[2:-2].replace(" ","").replace("\\\\","\\").replace("\\","%"))
                    comment_user_id = comment.find_all("a")[1].text
                    comment_body = comment.find('div',attrs={"data-sigil":"comment-body"})
                    if comment_body.find("a"):
                        quote_user_id = comment_body.find("a").text
                        quote_user_url = comment_body.find("a")["href"]
                    else:
                        quote_user_id = ""
                        quote_user_url = ""
                    if quote_user_id:
                        comment_text = comment_body.text.replace(quote_user_id,"@"+quote_user_id , 1)
                    else:
                        comment_text = comment_body.text
                    comment_image_element = comment.find(filter_comment_image)
                    comment_image_url = ""
                    comment_image_description = ""
                    if comment_image_element:
                        datr_search = re.search('\(.+?\)',comment_image_element["style"], re.DOTALL)
                        if datr_search:
                            comment_image_url = parse.unquote(datr_search.group(0)[2:-2].replace(" ","").replace("\\\\","\\").replace("\\","%"))
                        if "aria-label" in comment_image_element.attrs:
                            comment_image_description = comment_image_element.attrs["aria-label"]
                        else:
                            comment_image_description = ""
                            
                    if comment.find_all("abbr") :  
                        comment_time = comment.find_all("abbr")[0].text
                    comments_list.append({"comment_user_id":comment_user_id,
                                             "comment_user_img":comment_user_img,
                                             "comment_user_url":comment_user_url,
                                             "quote_user_id":quote_user_id, #评论@的用户
                                             "quote_user_url":quote_user_url,
                                             "comment_text":comment_text,
                                             "comment_image_description":comment_image_description,
                                             "comment_image_url":comment_image_url,
                                             "comment_time" :   comment_time ,
                                             "dt_gather_time" : time.time(),
                                             "post_id" : childurl
                        }
                        
                        )
                return_info["conmments"] =comments_list
        except Exception as e:
            return {"status" :"fail",
                    "error" : e,
                    "post_share_url" :"",
                    "post_like_url" : "",
                    "soup" : child_soup
                       }
        try:
            self.SqlExecuter.add_data(return_info,"posts")
            self.SqlExecuter.add_data(comments_list,"reply")
            
            self.MqPusher_datacenter.provide(mqPostsTemplate(return_info,self.taskid))
            for comment in comments_list:
                self.MqPusher_datacenter.provide(mqReplysTemplate(comment,self.taskid))
            
            return {"status" :"success",
                    "comments_num" : len(comments_list),
                    "post_share_url" : post_share_url,
                    "post_like_url" : post_like_url
                       }
        except Exception as e:
            return {"status" :"fail",
                    "error" : e,
                    "post_share_url" :"",
                    "post_like_url" : ""
                       }
            
        
    def get_share_people(self,post_share_url,childurl):
        savepath_md5 = hashlib.md5(str(time.time()).encode(encoding='UTF-8')).hexdigest()
        if not post_share_url:return [],None
        try:
            r = self.session.get(self.base_url + post_share_url, cookies=self.default_cookies)
        except:
            logger.debug(f"Requesting page from:{post_share_url} is defeated")
            return None
        child_soup= BeautifulSoup(r.text)
        share_data_list = []
        for a in child_soup.find_all("div",attrs = {"data-sigil" : "undoable-action marea"}):
            img_data = a.find("i")["style"]

            user_profile_img = parse.unquote(re.search('\(.+?\)',img_data, re.DOTALL).group(0)[2:-2].replace(" ","").replace("\\\\","\\").replace("\\","%"))
            user_url = a.find("a")["href"]
            user_name = a.find("strong").text
            
            savepath = "facebook/" + self.user_id + "/" + hashlib.md5(user_profile_img.encode(encoding='UTF-8')).hexdigest()  + ".jpg"
            push_data = {"dateTime":time.time(),
                     "destPath":savepath,
                     "priority":0,
                     "resourceType":"images",
                     "srcPath":user_profile_img}
            share_data_list.append({"user_name" : user_name,
                                    "user_url"  : user_url,
                                    "user_profile_img" : user_profile_img,
                                    "dt_gather_time" :time.time(),
                                    "post_id": childurl,
                                    "url_local" :savepath})
            
            self.MqPusher_pic.provide(push_data)
            
        if child_soup.find_all("a",attrs = {"href":re.compile("/browse/shares")}):
            next_url = child_soup.find_all("a",attrs = {"href":re.compile("/browse/shares")})[0]["href"]
        else:
            next_url = None
        self.SqlExecuter.add_data(share_data_list,"share")
        for share in share_data_list:
            self.MqPusher_datacenter.provide(mqSharesTemplate(share,self.taskid))
        logger.info("got %.0f share data"%len(share_data_list))
        return next_url

    def get_more_share_people(self,post_share_url,childurl):
        savepath_md5 = hashlib.md5(str(time.time()).encode(encoding='UTF-8')).hexdigest()
        if not post_share_url: return [],None
        try:
            r = self.session.get(self.base_url + post_share_url, cookies=self.default_cookies)
        except:
            logger.debug(f"Requesting page from:{post_share_url} is defeated")
            return None
        child_soup= BeautifulSoup(r.text)
        share_data_list = []
        for a in child_soup.find_all("div",attrs = {"data-sigil" : "undoable-action marea"}):
            img_data = a.find("i")["style"]

            user_profile_img = parse.unquote(re.search('\(.+?\)',img_data, re.DOTALL).group(0)[2:-2].replace(" ","").replace("\\\\","\\").replace("\\","%"))
            user_url = a.find("a")["href"]
            user_name = a.find("strong").text
            savepath = "facebook/" + self.user_id + "/" + hashlib.md5(user_profile_img.encode(encoding='UTF-8')).hexdigest()  + ".jpg"
            share_data_list.append({"user_name" : user_name,
                                    "user_url"  : user_url,
                                    "user_profile_img" : user_profile_img,
                                    "dt_gather_time" :time.time(),
                                    "post_id": childurl,
                                    "url_local" :savepath})
        if child_soup.find_all("a",attrs = {"href":re.compile("/browse/shares")}):
            next_url = child_soup.find_all("a",attrs = {"href":re.compile("/browse/shares")})[0]["href"]
        else:
            next_url = None
        self.SqlExecuter.add_data(share_data_list,"share")
        for share in share_data_list:
            self.MqPusher_datacenter.provide(mqSharesTemplate(share,self.taskid))
        logger.info("got %.0f share data"%len(share_data_list))
        return next_url
    
    def get_like_people(self,post_like_url,childurl):
        savepath_md5 = hashlib.md5(str(time.time()).encode(encoding='UTF-8')).hexdigest()
        try:
            r = self.session.get(self.base_url + post_like_url, cookies=self.default_cookies)
        except:
            logger.debug(f"Requesting page from:{post_like_url} is defeated")
            return None
        if r.text.startswith("for"):
            aa = json.loads(r.text[9:])
            child_soup= BeautifulSoup(aa["payload"]["actions"][0]["html"])
        else:
            child_soup= BeautifulSoup(r.text)
        manners = ['赞', '大爱', '抱抱', '哇', '怒', '笑趴', '心碎']
        for index,manner in enumerate(child_soup.find_all("div",attrs = {"class" : "scrollAreaColumn"})[0].find_all("i")):
            url = parse.unquote(re.search('\(.+?\)',manner["style"], re.DOTALL).group(0)[2:-2].replace(" ","").replace("\\\\","\\").replace("\\","%"))
            self.mannerdict[url] = manners[index]
            
        like_data_list = []
        for a in child_soup.find_all("div",attrs = {"data-sigil" : "undoable-action marea"}):
            img_data = a.find("i")["style"]
            user_profile_img = parse.unquote(re.search('\(.+?\)',img_data, re.DOTALL).group(0)[2:-2].replace(" ","").replace("\\\\","\\").replace("\\","%"))
            user_url = a.find("a")["href"]
            user_name = a.find("strong").text
            if a.next_sibling:
                manner = parse.unquote(re.search('\(.+?\)',a.next_sibling["style"], re.DOTALL).group(0)[2:-2].replace(" ","").replace("\\\\","\\").replace("\\","%"))
            else:
                manner = ""
            savepath = "facebook/" + self.user_id + "/" + hashlib.md5(user_profile_img.encode(encoding='UTF-8')).hexdigest()  + ".jpg"
            like_data_list.append({"user_name" : user_name,
                                    "user_url"  : user_url,
                                    "url_local" :savepath,
                                    "user_profile_img" : user_profile_img,
                                    "manner" : self.mannerdict[manner],
                                    "dt_gather_time" : time.time(),
                                    "post_id": childurl})
        if child_soup.find_all("a",attrs = {"href":re.compile("/ufi/reaction/profile/browser")}):
            next_url =  child_soup.find_all("a",attrs = {"href":re.compile("/ufi/reaction/profile/browser")})[0]["href"]
        else:
            next_url = None
        self.SqlExecuter.add_data(like_data_list,"like")   
        for like in like_data_list:
            self.MqPusher_datacenter.provide(mqLikesTemplate(like,self.taskid))
        logger.info("got %.0f like data"%len(like_data_list))
        return next_url
    
    def get_more_like_people(self,post_like_url,childurl):
        savepath_md5 = hashlib.md5(str(time.time()).encode(encoding='UTF-8')).hexdigest()
        if not post_like_url: return [],None
        try:
            r = self.session.get(self.base_url + post_like_url, cookies=self.default_cookies)
        except:
            logger.debug(f"Requesting page from:{post_like_url} is defeated")
            return None
        if r.text.startswith("for"):
            aa = json.loads(r.text[9:])
            child_soup= BeautifulSoup(aa["payload"]["actions"][0]["html"])
            child_soup2= BeautifulSoup(aa["payload"]["actions"][1]["html"])
            if child_soup2.find_all("a",attrs = {"href":re.compile("/ufi/reaction/profile/browser")}):
                next_url =  child_soup2.find_all("a",attrs = {"href":re.compile("/ufi/reaction/profile/browser")})[0]["href"]
            else:
                next_url = None
        else:
            child_soup= BeautifulSoup(r.text)
            next_url =  child_soup.find_all("a",attrs = {"href":re.compile("/ufi/reaction/profile/browser")})[0]["href"]
        
        like_data_list = []
        for a in child_soup.find_all("div",attrs = {"data-sigil" : "undoable-action marea"}):
            img_data = a.find("i")["style"]
            user_profile_img = parse.unquote(re.search('\(.+?\)',img_data, re.DOTALL).group(0)[2:-2].replace(" ","").replace("\\\\","\\").replace("\\","%"))
            user_url = a.find("a")["href"]
            user_name = a.find("strong").text
            if a.next_sibling:
                manner = parse.unquote(re.search('\(.+?\)',a.next_sibling["style"], re.DOTALL).group(0)[2:-2].replace(" ","").replace("\\\\","\\").replace("\\","%"))
            else:
                manner = ""
            savepath ="facebook/" + self.user_id + "/" + hashlib.md5(user_profile_img.encode(encoding='UTF-8')).hexdigest()  + ".jpg"
            like_data_list.append({"user_name" : user_name,
                                    "user_url"  : user_url,
                                    "url_local" :savepath,
                                    "user_profile_img" : user_profile_img,
                                    "manner" : self.mannerdict[manner],
                                    "dt_gather_time" : time.time(),
                                    "post_id": childurl})
        
        self.SqlExecuter.add_data(like_data_list,"like")   
        for like in like_data_list:
            self.MqPusher_datacenter.provide(mqLikesTemplate(like,self.taskid))
        logger.info("got %.0f like data"%len(like_data_list))
        return next_url