import json
from selenium import webdriver
from scrapy.http import HtmlResponse
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

results = []

def parse(response):
    for comment in response.css('div.comment-list-item'):

        result = {}
        result["name"] = comment.xpath('//*[@id="comments"]/div/div/div/div/div[2]/div[1]/append/text()').extract_first()
        result["content"] = comment.xpath('//*[@id="comments"]/div/div/div/div/div[2]/div[2]/p/text()').extract_first()
        results.append(result)


def has_next_page(response):
    dis=response.css('li.disabled.next-page').extract_first()
    #disab = "disabled"
    if not dis:
        return 1

def goto_next_page(driver):
    #print("/*/*/*/*/*/*/")
    #driver.find_element_by_xpath('//*[@id="comments"]/div/div[4]/ul/li[7]/a').click()
    #print("/*/*/*/*/*/*/*/*/*/*/***/*/*/*/*//*/*//*//*/*//")
    driver.find_element_by_css_selector('div.comment-box li.next-page').click()
def wait_page_return(driver, page):
    WebDriverWait(driver, 50).until(
        EC.text_to_be_present_in_element(
            (By.XPATH, '//ul[@class="pagination"]/li[@class="active"]'),
            str(page)
        )
    )

def spider():
    driver = webdriver.PhantomJS()
    url = 'https://www.shiyanlou.com/courses/427'
    driver.get(url)
    page = 1
    while True:
        wait_page_return(driver, page)
        html = driver.page_source
        response = HtmlResponse(url=url, body=html.encode('utf8'))
        #parse(response)
        for i in response.css('div.comment-item-wrapper'):
            d = {
                'username': i.css('a.username::text').extract_first().strip(),
                'content': i.css('p::text').extract_first()
                }
            results.append(d)
        #print(page)
        if not has_next_page(response):
            break
        page += 1
        print('----------', page)
        goto_next_page(driver)
    with open('/home/shiyanlou/comments.json', 'w') as f:
        #f.write(json.dumps(results))
        #print(results)
        #f.write(str(results))
        json.dump(results, f)



if __name__ == '__main__':
    spider()
