# # coding:utf-8
#
# import time
# import sys
# import crawler
# import docx
# from docx.shared import Inches
#
#
# # 易百网
# class YiBai(object):
#     def __init__(self):
#         # 需要进行配置
#         self.filename = 'Selenium.docx'
#         self.web_src = 'SeleniumImage'
#         self.init_url = 'https://www.yiibai.com/selenium/'
#
#         self.base_url = 'https://www.yiibai.com'
#         self.img_url = 'http://www.yiibai.com'
#
#     # 获取网页内容
#     def getContent(self, url):
#         content_list = []
#         temp_list = []
#
#         soup = crawler.get_soup_html(url)
#         title = soup.find(attrs={"class": "article-title"}).text.strip()
#         tag_list = soup.find(attrs={"class": "article-content"}).findAll({"h2", "p", "img", "div"})
#         tag_len = tag_list.__len__()
#
#         for tag in tag_list:
#             tag_name = tag.name
#             tag_content = tag.text.strip().replace("\n", "")
#             if tag.contents.__len__() >= tag_len:
#                 continue
#             if tag_content.__contains__("易百教程移动端"):
#                 break
#             if tag_name == "p" or tag_name == "div":
#                 if tag_content != "" and tag_content not in temp_list:
#                     temp_list.append(tag_content)
#                     content_list.append('p:' + str(tag.text.strip()))
#             elif tag_name == "h2":
#                 content_list.append('h:' + str(tag.text.strip()))
#             elif tag_name == "img" and tag['src'].__contains__('/uploads'):
#                 if tag['src'].__contains__(self.img_url):
#                     img_src = tag['src']
#                 else:
#                     img_src = self.base_url + tag['src']
#                 img_path = crawler.downloadImg(img_src, self.web_src)
#                 content_list.append('m:' + img_path)
#
#         return title, content_list
#
#     def saveDoc(self, title, content_list, doc):
#         doc.add_heading(unicode(title), 1)
#
#         for content in content_list:
#             if content[:2] == 'p:':
#                 doc.add_paragraph(unicode(content[2:]))
#             elif content[:2] == 'h:':
#                 doc.add_heading(unicode(content[2:]), 2)
#             elif content[:2] == 'm:':
#                 try:
#                     doc.add_picture(unicode(content[2:]), width=Inches(4.0))
#                 except:
#                     print
#                     '该图片下载不成功：' + content[2:]
#                     continue
#
#     def get_url(self):
#         url_list = []
#
#         soup = crawler.get_soup_html(self.init_url)
#         tag_list = soup.find(attrs={"class": "pagemenu"}).findAll({"a"})
#         for tag in tag_list:
#             try:
#                 if tag['href'].__contains__(self.base_url):
#                     href = tag['href']
#                 else:
#                     href = self.base_url + tag['href']
#                 url_list.append(href)
#             except:
#                 print
#                 tag
#
#         return url_list
#
#     # 运行
#     def run(self):
#         doc = docx.Document(option["template_path"] + "template.docx")
#         # 获取url
#         url_list = self.get_url()
#
#         # 获取网页内容
#         for url in url_list:
#             print
#             url
#             title, content_list = self.getContent(url)
#             self.saveDoc(title, content_list, doc)
#             time.sleep(5)
#
#         doc.save(option["output_path"] + self.filename)
#
#
# if __name__ == "__main__":
#     yiBai = YiBai()
#     yiBai.run()
