【2023python爬虫1000集】目前B站最完整的爬虫教程,包含所有干货内容

屠戮电影天堂(结合老师视频代码,又通过GPT优化了代码,jupyter notebook内可以正常运行)
import requests
import re
import csv
import pandas as pd
# 设置请求头
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 Edg/116.0.1938.62"
}
# 发送请求获取主页内容
url = "https://www.dytt89.com"
response = requests.get(url, headers=headers)
response.encoding = 'gb2312'
# 使用正则表达式提取子页面链接和电影信息
obj1 = re.compile(r'2023必看热片.*?<ul>(?P<ul>.*?)</ul>', re.S)
obj2 = re.compile(r'<a href=(?P<href>.*?) ', re.S)
obj3 = re.compile(r'◎片 名(?P<movie>.*?)<br />.*?<td '
r'style="WORD-WRAP: break-word" bgcolor="#fdfddf"><a href="(?P<download>.*?)">', re.S)
result1 = obj1.finditer(response.text)
child_href_list = []
# 提取子页面链接
for it in result1:
ul = it.group('ul')
result2 = obj2.finditer(ul)
for itt in result2:
child_href = url + itt.group('href').strip("/").strip("'")
child_href_list.append(child_href)
movies = []
# 提取子页面内容并保存电影信息
for href in child_href_list:
child_response = requests.get(href)
child_response.encoding = 'gb2312'
result3 = obj3.search(child_response.text)
movie = result3.group('movie').strip()
download_link = result3.group('download').strip()
movies.append({'电影': movie, '下载链接': download_link})
# 将电影信息保存为DataFrame
df = pd.DataFrame(movies)
# 保存为CSV文件
filename = 'movies.csv'
df.to_csv(filename, index=False, encoding='utf-8-sig')
print(f"电影信息已成功保存到 {filename} 文件中。")