编写Python爬虫的过程可以分为以下几个步骤:
pip install requests
pip install beautifulsoup4
pip install lxml
import requests
from bs4 import BeautifulSoup
import re
url = 'https://example.com'
response = requests.get(url)
html_content = response.text
soup = BeautifulSoup(html_content, 'lxml')
# 使用正则表达式提取信息
title = re.search(r'<title>(.*?)</title>', html_content).group(1)
# 使用XPath提取信息
links = soup.xpath('//a/@href')
# 使用CSS选择器提取信息
paragraphs = soup.cssselect('p')
# 将信息存储到CSV文件中
with open('output.csv', 'w', newline='', encoding='utf-8') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['Title', 'Links'])
for link in links:
writer.writerow([title, link])
def crawl(url):
# 发送HTTP请求、解析HTML内容、提取信息的代码
pass
urls = ['https://example.com/page1', 'https://example.com/page2']
for url in urls:
crawl(url)
import time
for url in urls:
crawl(url)
time.sleep(5) # 暂停5秒
try:
response = requests.get(url)
response.raise_for_status()
except requests.exceptions.RequestException as e:
print(f"Error: {e}")
以上就是编写Python爬虫的基本过程。需要注意的是,在编写爬虫时,要遵守目标网站的robots.txt协议,尊重网站的版权和隐私政策,不要进行过于频繁的请求,以免给目标网站带来负担。