# -*- encoding: utf-8 -*-
import re
import urllib
from collections import deque
import urllib.request
import requests
import os
import pickle
import shutil
import codecs
import pickle

from lxml import etree
import socks
import socket
from sockshandler import SocksiPyHandler

def mv(i, o):
    if os.path.isdir(o):
        print(o)
    else:
        shutil.move(i, o)

input_path = u'.' #所有文件夹
dirs = [f for f in os.listdir(input_path) if os.path.isdir(os.path.join(input_path, f))] #遍历本地文件夹
rj_re = re.compile(r'RJ\d*')
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) '
           'AppleWebKit/537.36 (KHTML, like Gecko) '
           'Chrome/56.0.2924.87 Safari/537.36'}
for d in dirs:
    try:
        match_rj = rj_re.search(d).group()
        dlurl = 'http://www.dlsite.com/maniax/work/=/product_id/' + str(match_rj) + '.html' #每个文件夹RJ号的网站链接
        proxies = {'http': "socks5://127.0.0.1:1080", 'https': "socks5://127.0.0.1:1080"}
        r = requests.get(dlurl, headers=headers, proxies=proxies)
        file_content = r.text#
        #print(file_content)
        tree = etree.HTML(file_content)  #对html文本使用 etree.HTML(html)解析,得到Element对象
        maker = tree.xpath('//*[@id="work_left"]/div[2]/div[2]/div/div[1]/div[1]/ul/li[1]/img')[0]
        print(maker}
        match_rj = rj_re.search(d).group()
        output_name = '[%s] %s %s' % (maker, title, match_rj)
        output_name = output_name.replace("\n", "")#不知道为何maker后面会有换行符，替换掉
        filtered_output_name = (''.join([i for i in output_name if i not in r"/\\:*?\"<>|"])).strip()
        mv(os.path.join(input_path, d), os.path.join(input_path, filtered_output_name)
    except:
        print(os.path.join(input_path, d) + ' Sorry,could not found page for this folder')

input('Prss Enter to exit...')