本文正在参加「Python主题月」,详情查看 活动连接html
在开始以前,咱们须要将对应的环境等等须要装配好。python
因为本地已经存在docker,直接拉取JupyterHub
镜像进行本地运行,使用JupyterHub
进行代码编写。docker
jovyan
密码:hipaddle
。(密码可进容器中:/etc/jupyter/jupyterhub_config.py
查看)咱们首要获取的资源都是来源于电影天堂,咱们须要先熟悉页面,同时咱们也须要查看页面源码元素,这里的获取是经过解析网页源码中元素进行获取的。macos
url
的规律。obj
,进行记录,继续查找下一个电影元素位置。for index in range(2, 3):
# 1.电影列表的地址url
url = base_url.format(index)
# 2.获取当前页面包含的全部电影【详情地址】
detail_urls = get_detail_urls(url)
# 3.解析每一项电影的详情页面
for key, detail_url in enumerate(detail_urls):
# print('索引:' + str(key) + ',地址:' + detail_url)
# print('解析详情页面:' + detail_url)
film = parse_detail_page(detail_url)
films.append(film)
# 4.每爬取一页,就休眠2秒钟
time.sleep(1)
复制代码
......
# 【数据 - 电影标题】
title = html_element.xpath('//div[@class="title_all"]//font[@color="#07519a"]/text()')[0]
# 获取zoom标签
zoom_element = html_element.xpath('//div[@id="Zoom"]')[0]
# 【数据 - 电影封面和电影截图】
imgs = zoom_element.xpath(".//img/@src")
......
复制代码
film = {
'title': title,
'cover': cover,
'screen_shot': screen_shot,
'year': year,
'country': country,
'type': type,
'rating': rating,
'duration': duration,
'director': director,
'actors': actors,
'desc': desc,
'download_url': download_url
}
......
复制代码
import requests
from lxml import etree
import time
# 主页地址
BASE_DOMAIN = 'http://www.dytt8.net'
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.92 Safari/537.36',
}
def get_detail_urls(url):
response = requests.get(url, headers=HEADERS)
html_element = etree.HTML(response.text)
# 【数据 - 字符串列表】详情页面地址
detail_urls = html_element.xpath('//table[@class="tbspan"]//a/@href')
# 过滤掉【综合电影】致使的脏数据
detail_urls_new = detail_urls
for index, detail_url in enumerate(detail_urls_new):
if detail_url == '/html/gndy/jddy/index.html':
detail_urls.remove(detail_url)
# 组装详情页面的地址
detail_urls = map(lambda x: BASE_DOMAIN + x, detail_urls)
return detail_urls
def parse_detail_page(detail_url):
response = requests.get(detail_url, headers=HEADERS)
text = response.content.decode('gb18030')
html_element = etree.HTML(text)
# 【数据 - 电影标题】
title = html_element.xpath('//div[@class="title_all"]//font[@color="#07519a"]/text()')[0]
# 获取zoom标签
zoom_element = html_element.xpath('//div[@id="Zoom"]')[0]
# 【数据 - 电影封面和电影截图】
imgs = zoom_element.xpath(".//img/@src")
# 注意:为了不脏数据致使应用挂掉,提早初始化
year, country, type, rating, duration, director, actors, cover, screen_shot, download_url = '', '', '', '', '', '', '', '', '', ''
if len(imgs) > 0:
cover = imgs[0]
# 【数据 - 电影截图】
if len(imgs) > 1:
screen_shot = imgs[1]
# 获取div[@id='zoom']标签下面的全部的文本数据【子孙全部的text文本数据】
infos = zoom_element.xpath('.//text()')
# 解析具体内容的函数
def parse_info(info, rule):
return info.replace(rule, '').strip()
# 遍历infos每一项去获取有用的数据
for key, info in enumerate(infos):
# print('遍历第{}项'.format(key))
# print(info)
# print('结束==================================================')
if info.startswith('◎年 代'):
# 年代
year = parse_info(info, '◎年 代')
elif info.startswith('◎产 地'):
# 产地
country = parse_info(info, '◎产 地')
elif info.startswith('◎类 别'):
# 类别
type = parse_info(info, '◎类 别')
elif info.startswith('◎豆瓣评分'):
# 豆瓣评分
rating = parse_info(info, '◎豆瓣评分')
elif info.startswith('◎片 长'):
# 片长
duration = parse_info(info, '◎片 长')
elif info.startswith('◎导 演'):
# 导演
director = parse_info(info, '◎导 演')
elif info.startswith('◎主 演'):
# 演员【第一个演员】
actor_first = parse_info(info, '◎主 演')
actors = [actor_first]
# 继续往下面遍历
for index in range(key + 1, len(infos)):
item = infos[index].strip()
if item.startswith('◎简 介'):
break
# 获取全部的演员
# print(item)
actors.append(item)
elif info.startswith('◎简 介'):
# desc = parse_info(info, '◎简 介')
for index in range(key + 1, len(infos)):
item = infos[index].strip()
if item.startswith('【下载地址】'):
break
desc = item
print(detail_url)
# 下载地址
if len(html_element.xpath('//td[@bgcolor="#fdfddf"]/a/text()')) > 0:
download_url = html_element.xpath('//td[@bgcolor="#fdfddf"]/a/text()')[0]
elif len(html_element.xpath('//td[@bgcolor="#fdfddf"]/text()')) > 0:
download_url = html_element.xpath('//td[@bgcolor="#fdfddf"]/text()')[0]
film = {
'title': title,
'cover': cover,
'screen_shot': screen_shot,
'year': year,
'country': country,
'type': type,
'rating': rating,
'duration': duration,
'director': director,
'actors': actors,
'desc': desc,
'download_url': download_url
}
return film
def spider():
base_url = 'http://www.dytt8.net/html/gndy/dyzz/list_23_{}.html'
films = []
for index in range(2, 3):
url = base_url.format(index)
detail_urls = get_detail_urls(url)
for key, detail_url in enumerate(detail_urls):
# print('索引:' + str(key) + ',地址:' + detail_url)
# print('解析详情页面:' + detail_url)
film = parse_detail_page(detail_url)
films.append(film)
time.sleep(1)
print(films)
if __name__ == '__main__':
spider()
复制代码