要提高Python爬虫代码的复用性,可以采取以下几种方法:
class WebScraper:
def __init__(self, url):
self.url = url
def fetch_content(self):
# 获取网页内容的代码
pass
def parse_content(self, content):
# 解析网页内容的代码
pass
def save_data(self, data):
# 保存数据的代码
pass
def run(self):
content = self.fetch_content()
data = self.parse_content(content)
self.save_data(data)
# fetch_content.py
def fetch_content(url):
# 获取网页内容的代码
pass
# parse_content.py
def parse_content(content):
# 解析网页内容的代码
pass
# save_data.py
def save_data(data):
# 保存数据的代码
pass
from scrapy import Spider, Request
from bs4 import BeautifulSoup
class MySpider(Spider):
name = 'myspider'
start_urls = ['http://example.com']
def parse(self, response):
soup = BeautifulSoup(response.text, 'html.parser')
# 解析网页内容的代码
pass
# config.ini
[settings]
user_agent = My User Agent
headers = {'X-Custom-Header': 'My Value'}
proxies = {'http': 'http://proxy.example.com:8080', 'https': 'https://proxy.example.com:8080'}
def get_response(url, headers=None, proxies=None):
# 发送HTTP请求的代码
pass
def get_soup(response, parser='html.parser'):
# 解析网页内容的代码
pass
通过以上方法,可以提高Python爬虫代码的复用性,使代码更加模块化和易于维护。