# from POM.aaa import A
#
#
# def fb():
#     print('你好')
#
# A().fa()
# fb()
import random
import re
from random import randint

# 彩票

import requests
import xlsxwriter as xlsxwriter

# print(random.randint(1,21),random.randint(1,21),random.randint(1,31),random.randint(1,31),random.randint(1,31),random.randint(1,41),random.randint(1,21))



# import xlwt
# workbook = xlwt.Workbook()
# # 创建工作表worksheet,填入表名
# worksheet = workbook.add_sheet('sheet1')
# # 在表中写入相应的数据
#                #行 列  内容
# worksheet.write(0, 0, '室')
# worksheet.write(1, 0, '房间')
# worksheet.write(0, 1, '厅')
# worksheet.write(0, 2, '卫')
# worksheet.write(0, 3, '单价/平方米')
# worksheet.write(0, 4, '总价/万元')
# workbook.save(r'D:\ui\POM\hello1.xls')   #保存并命名


# workbook = xlsxwriter.Workbook(r'D:\ui\POM\angle_table.xlsx')
# worksheet = workbook.add_worksheet()
# worksheet.write(0, 0, '电影名称')  # 第i行0列
# worksheet.write(0, 1, '迅雷链接') # 第i行1列
#
# workbook.close()



# page_content = r'D:\ui\POM\1314.txt'
# with open(page_content,'r',encoding='utf-8') as f:
#     a = f.read()
#
# obj = re.compile(r'http://(?P<url>.*?)',re.S)
# url_list = obj.finditer(a)
#
# for i in url_list:
#     print(i.group('url'))

# for i in range(0, 3 ) :
#     if i == 1 :
#         # 忽略本次循环的剩下语句
#         continue
#     print(i)

# a = [1,2]
# for i in [1,2,2,2,3,3,4]:
#     if i in a:
#         continue
#     a.append(i)
#     print(i)
# print(a)

# p_url = r'"(http.*)"'
# r_url = re.compile(p_url)

# page_content = r'D:\ui\POM\1314.txt'
# with open(page_content,'r',encoding='utf-8') as f:
#     for line in f.readlines():
#         m_url = r_url.search(line)
#         if m_url is not None:
#             url = m_url.group(1)
#             print(url)


#
# for i in range(0,226,25):
#     print(i)



# import requests
# import xlsxwriter
# from lxml import etree
# from bs4 import BeautifulSoup
# from concurrent.futures import ThreadPoolExecutor
#
#
# # def test_01()
#
#
# def download_one_page(url, d, b, worksheet):
#     headers = {
#         "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
#     }
#     res = requests.get(url, headers=headers)
#     res.encoding = 'utf-8'
#
#     # 拿到电影名字
#     main_page = BeautifulSoup(res.text, 'html.parser')  # 指定HTML解析器
#     alist = main_page.find('ol', class_="grid_view").find_all('img')
#
#     '''
#     处理数据 把数据里面的'//' '\' 或者别的去掉
#     alist_s = (al.replace('//','').replace("\",'') for al in alist)
#     '''
#     x = d
#     for a in alist:
#         name = a.get('alt')
#         y = x + 1
#         worksheet.write(y, 0, name)
#         x += 1
#
#     html = etree.HTML(res.text)
#     ol = html.xpath('//*[@id="content"]/div/div[1]/ol')[0]
#     lis = ol.xpath('./li')
#
#     w = b
#     # 拿到每个li
#     for li in lis:
#         # 拿到电影评分
#         score = li.xpath('./div/div[2]/div[2]/div/span[2]/text()')[0]
#         # 拿到电影的描述
#         try:
#             describe = li.xpath('./div/div[2]/div[2]/p[2]/span/text()')[0]
#         except:
#             describe = None
#         # /html/body/div[3]/div[1]/div/div[1]/ol/li[1]/div/div[2]/div[2]/p[2]/span
#         # print(describe)
#         j = w + 1
#         worksheet.write(j, 1, score)
#         worksheet.write(j, 2, describe)
#         w += 1
#         # print(describe)
#
#     print(url, '提取完毕!')
#     res.close()
#     return y, j
#
#
#
# if __name__ == '__main__':
#     workbook = xlsxwriter.Workbook(r'D:\ui\reptile\data\douban_top250_data.xlsx')
#     worksheet = workbook.add_worksheet()
#     worksheet.write(0, 0, '电影名称')  # 第i行0列
#     worksheet.write(0, 1, '电影评分')  # 第i行1列
#     worksheet.write(0, 2, '电影描述')
#     a = 0
#     # 标记
#     b = 0
#     for i in range(0, 126, 25):
#         lis = download_one_page(f'https://movie.douban.com/top250?start={i}&filter=', a, b, worksheet)
#         a = lis[0]
#         b = lis[1]
#         # print(a, b)
#     workbook.close()

# url = 'http://kr.shanghai-jiuxin.com/file/2022/0414/9e5827678bd12db0999a573254e40d1e.jpg'
# name = url.strip('/')
# print(name)
# from lxml import etree
#
# headers = {
#         "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.54 Safari/537.36"
#     }
# res = requests.get('https://www.qbiqu.com/79_79354/44787739.html',headers=headers)
# res.encoding = 'gbk'
# page_content = res.text
# selector = etree.HTML(page_content)
# content = selector.xpath('//*[@id="content"]/text()')
# novel_content = ''.join(content)
# novel_content_s = novel_content.replace(u'\xa0','').replace(' ','').replace('\n','')
# with open(r'D:\ui\reptile\coroutines\data\1253.txt','w',encoding='utf-8') as f:
#     f.write(novel_content_s)
#
# res.close()
# obj = re.compile(r'&nbsp;&nbsp;&nbsp;&nbsp;(?P<novel_content>.*?) <', re.S)
# result = obj.finditer(page_content)
# for i in result:
#     title = i.group('novel_content')
#     print(title)


a = '/79_79354/45084133.html'
b= a.rsplit('/',1)[1]
print(b)

