#scrapy的基本使用,网址:https://movie.douban.com/top250

#创建爬虫项目douban

scrapy startproject douban

#切换路径

cd douban

#创建爬虫名称db

scrapy genspider db https://movie.douban.com/top250

##scrapy自动生成的代码,部分需修改。 #db.py

#导入需要的模块

import scrapy

from ..items import DoubanItem

class DbSpider(scrapy.Spider):

name = "db"

#需要爬取的域名

allowed_domains = ["movie.douban.com"]

#爬取4页内容,第一页为0,第二页为25,以此类推50,75,100

start_urls = [f"https://movie.douban.com/top250?start={i}&filter=" for i in range(0,101,25)]

#response为请求得到的内容

def parse(self, response):

#导入itenm

douban=DoubanItem()

#这里数据解析用xpath

for i in response.xpath('//ol[@class="grid_view"]/li'):

print(i.xpath('./div/div[2]/div/a/span/text()').extract_first())

#接下来设置配置请求参数 #settings.py

BOT_NAME = "douban"

SPIDER_MODULES = ["douban.spiders"]

NEWSPIDER_MODULE = "douban.spiders"

USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0"

ROBOTSTXT_OBEY = False

REQUEST_FINGERPRINTER_IMPLEMENTATION = "2.7"

TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"

FEED_EXPORT_ENCODING = "utf-8"

好文阅读

评论可见,请评论后查看内容,谢谢!!!
 您阅读本篇文章共花了: