import selenium
from selenium import webdriver
from selenium.webdriver.common.by import By
from bs4 import BeautifulSoup as bf
from paras import *

#config parameters domin
store_path = GET_PLATFORM()



def parse_action(action):
    #parse comment action
    action_dic={}
    action_split=action.split()
    if action_split[1] == "Android" or action_split[1] =="iPhone":
        action_dic["device"] = action_split[1]
        action_dic["ding"] =  action_split[-4][2]
        action_dic["cai"] =  action_split[-3][2]
    else:#may from PC device
        action_dic["device"] = "PC"
        action_dic["ding"] =  action_split[-4][2]
        action_dic["cai"] =  action_split[-3][2]
    return action_dic

def click_webelement(button):
    if isinstance(button,list):
        for sub_button in button:
            SLEEP()
            try:
                sub_button.click()
            except:
                print("unable to click the button!")

    if isinstance(button,selenium.webdriver.remote.webelement.WebElement):
        try:
            button.click()
        except:
            print("unable to click the button")

def get_pages(browser):
    comment_sum=int(browser.find_element(By.CSS_SELECTOR,"#panelTitle > em").text)
    per_page_comments=30
    pages=comment_sum//per_page_comments+1
    return pages


def get_sigle_page_comments_dict(comments_dic,doc_model,commenter_url_list):


    comments=doc_model.select("#commentTabBlockNew > ul.comment_listBox > li")

    for comment in comments:
        #user information
        user_url = comment.select("div.comment_avatar > div > a")[0]["href"]
        user_id = user_url.split("/")[-2]

        #comment information
        comment_id = comment["id"].split("_")[2]
        floor = comment.select("div.comment_avatar > span")[0].text
        detail = comment.select("div.comment_conBox > div.comment_conWrap > div.comment_con > p > span")[0].text
        action = comment.select("div.comment_conBox > div.comment_conWrap > div.comment_action")[0].text
        published_date = comment.select("div.comment_conBox > div.comment_avatar_time > div.time > meta")[0]["content"]
        published_time = comment.select("div.comment_conBox > div.comment_avatar_time > div.time")[0].text
        action_dic=parse_action(action)

        #format
        comment_dic={
          "user_id":user_id,
          "comment_id":comment_id,
          "detail":detail,
          "published_date":published_date,
          "published_time":published_time,
          "action":action_dic ,
        }
        comments_dic[floor]=comment_dic
        commenter_url_list.append(user_url)

    return comments_dic,commenter_url_list

def get_sigle_page_comments_structure(comments_structure,browser):
    #click pad button

    pad_buttons=browser.find_elements(By.CSS_SELECTOR,"div[id*=new_show_comment_more_div] > a")
    click_webelement(pad_buttons)
    html=browser.page_source
    doc_model = bf(html,'html.parser')

    #parse
    comments_block = doc_model.select("#commentTabBlockNew > ul.comment_listBox > li.comment_list")

    for i in range(len(comments_block)):
        comment_block = comments_block[i]
        floor = comment_block.span.text
        comment_block_wraps = comment_block.select("div.comment_conBox > div.blockquote_wrap > blockquote")
        comment_block_dic = {}
        for comment in comment_block_wraps:
            comment_block_dic[int(comment.select(".comment_floor")[0].text)] = comment["blockquote_cid"]
        if comment_block_dic:
            comments_structure[floor]=comment_block_dic

    return comments_structure

def turn_page_item(browser,page_num):
    SLEEP()
    input_button = browser.find_element( By.CSS_SELECTOR , "#commentTabBlockNew > ul.pagination > li.jumpToPage > input")
    input_button.click()
    input_button.clear()
    input_button.send_keys(page_num)
    Go_button = browser.find_element(By.CSS_SELECTOR , "#commentTabBlockNew > ul.pagination > li:last-child > a")
    Go_button.click()

def get_comments(browser):
    pages=get_pages(browser)
    comments_dic = {}
    comments_structure = {}
    commenter_url_list = []
    for i in range(pages):
        page_index=i+1
        if page_index==1:
            SLEEP()
            html=browser.page_source
            doc_model=bf(html,'html.parser')
            comments_dic,commenter_url_list=get_sigle_page_comments_dict(comments_dic,doc_model,commenter_url_list)

            #block comments strcture info
            #comments_structure=get_sigle_page_comments_structure(comments_structure,browser)
        else:
            SLEEP()
            turn_page_item(browser,page_index)
            html=browser.page_source
            doc_model=bf(html,'html.parser')
            comments_dic,commenter_url_list=get_sigle_page_comments_dict(comments_dic,doc_model,commenter_url_list)

            # block comments strcture info
            #comments_structure=get_sigle_page_comments_structure(comments_structure,browser)

    comments = {
        "comments_detail":comments_dic,
        "comments_structure":comments_structure
    }
    return comments,list(set(commenter_url_list)) #strip identical url

def get_article_labels_dict(doc_model):
    article_labels = doc_model.select("#feed-main > div.item-name > div.artic-label-box > div.experience-meta-nowrap > ul > li > div > div")
    article_labels_dict = {}
    label_index = 0
    for i in range(len(article_labels)):
        a = article_labels[i].text.split()[0].split("：")
        key = a[0]
        value = a[1]
        if key == '标签':
            article_labels_dict[key+"_"+str(label_index)] = value
            label_index+=1
        else:
            article_labels_dict[key+"_"+str(label_index)] = value
    return  article_labels_dict

def try_select_introduction(doc_model , css_descrition):
    try:
        result =  doc_model.select(css_descrition)[0]
        return result.text
    except:
        return ""

def get_item_info(url,browser):

    html=browser.page_source
    doc_model = bf(html,'html.parser')
    title = doc_model.select("#feed-main > div.info.J_info > div > div.title-box > h1")[0].text
    author_url = doc_model.select("#feed-main > div.info.J_info > div > div.info-details > div.author-info.J_author_info > a")[0]["href"]
    worthy_num = doc_model.select("#rating_worthy_num")[0].text
    unworthy_num = doc_model.select("#rating_unworthy_num")[0].text
    release_time = doc_model.select("#feed-main > div.info.J_info > div > div.info-details > div.author-info.J_author_info > span")[0].text
    article_labels_dict = get_article_labels_dict(doc_model)

    introduction = try_select_introduction(doc_model,"#feed-main > div.item-name > div.baike > div.introduce > div > div > p")

    item_image_url = doc_model.select("#feed-main > div.info.J_info > a > img")[0]["src"]
    price_info = doc_model.select("#feed-main > div.info.J_info > div > div.title-box > div > span > div")[0].prettify()
    feed_wrap = doc_model.select("#feed-wrap > div > a> span")
    class_hierarchy = ''
    for i in feed_wrap:
        class_hierarchy=class_hierarchy+i.text+'_'
    comments , commenter_url_list = get_comments(browser)
    crawl_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())

    item_info = {
        "title" : title,
        "release_time" : release_time,
        "crawl_time" : crawl_time,
        "item_url" : url,
        "item_image_url" : item_image_url,
        "author_url" : author_url,
        "worthy" : {"worthy_num":worthy_num , "unworthy_num":unworthy_num},
        "article_labels" : article_labels_dict,
        "introduction" : introduction,
        "price_info" : price_info,
        "class_hierarchy" : class_hierarchy,
        "comments" : comments,
    }

    graph_structure_item = {
        "creator_url":author_url,
        "item_url": url,
        "commenters_url":commenter_url_list
    }

    return item_info,graph_structure_item


def crawl_item(url):
    try:
        #print(" >>>>>>>>crawling item : {} >>>>>>>>".format(url))
        browser = get_browser(url)
        item_info,graph_structure_item= get_item_info(url,browser)
        author_url, commenter_url_list = graph_structure_item["creator_url"], graph_structure_item["commenters_url"]
        file_name_item_info = "item_{}.json".format(url.split("/")[4])
        file_name_graph = "item_{}_graph_structure.json".format(url.split("/")[4])
        store_dir = os.path.join(store_path, "item_{}".format(url.split("/")[4]))
        try_mkdir(store_dir)
        write_json(file_name_item_info, item_info,store_dir)
        write_json(file_name_graph, graph_structure_item,store_dir)
        try_close_browser(browser)
        result = [author_url, commenter_url_list]
        return True , result

    except Exception as log:
        try_close_browser(browser)
        return False , log
    #url = 'https://www.smzdm.com/p/47801316/'

if __name__ == '__main__':
    url = 'https://www.smzdm.com/p/47801316/'
    # url =  'https://www.smzdm.com/p/49767111/'
    STATE , _return = crawl_item(url)
    print(STATE)
