当前位置: 首页 > 面试题库 >

根据来自scrapy的信号更新主线程中的PyQt5 Gui

谢麒
2023-03-14
问题内容

我有一个非常基本的蜘蛛,看起来像是来自沙哑的测试蜘蛛的跟随者蜘蛛。

import re

import scrapy.signals
from scrapy.http import Request, HtmlResponse
from scrapy.linkextractors import LinkExtractor
from six.moves.urllib.parse import urlparse

from page import Page


class ZenSpider( scrapy.Spider ) :
    def __init__(self) :
        super().__init__()

    name = 'followall'
    custom_settings = {
        'CLOSESPIDER_PAGECOUNT' : 2,
        "FEEDS" : {
            "items.csv" : {"format" : "csv"},
        },
    }

    def __init__(self, **kw) :
        super( ZenSpider, self ).__init__( **kw )
        url = kw.get( 'url' ) or kw.get( 'domain' ) or 'http://scrapinghub.com/'
        if not url.startswith( 'http://' ) and not url.startswith( 'https://' ) :
            url = 'http://%s/' % url
        self.url = url
        self.allowed_domains = [re.sub(r'^www\.', '', urlparse(url).hostname)]
        self.link_extractor = LinkExtractor()

    def start_requests(self):
        return [Request(self.url, callback=self.parse, dont_filter=True)]

    def parse(self, response):
        """Parse a PageItem and all requests to follow

        @url http://www.scrapinghub.com/
        @returns items 1 1
        @returns requests 1
        @scrapes url title foo
        """
        page = self._get_item(response)
        r = [page]
        r.extend(self._extract_requests(response))
        return r

    def _get_item(self, response):
        items = []
        item = Page(
            url=response.url,
            size=str( len( response.body ) ),
            status=response.status,
            # content_type=response.request.headers.get('Content-Type'),
            # encoding=response.request.headers.get('encoding'),
            # referer=response.request.headers.get('Referer'),
        )
        self._set_title( item, response )
        self._set_description( item, response )
        return item

    def _extract_requests(self, response):
        r = []
        if isinstance(response, HtmlResponse):
            links = self.link_extractor.extract_links( response )
            r.extend( Request( x.url, callback=self.parse ) for x in links )
        return r

    def _set_title(self, page, response) :
        if isinstance( response, HtmlResponse ) :
            title = response.xpath( "//title/text()" ).extract()
            if title :
                page['title'] = title[0]

    def _set_description(self, page, response) :
        if isinstance( response, HtmlResponse ) :
            description = response.xpath( "//meta[@name='description']/@content" ).extract()
            if description :
                page['description'] = description[0]

我从以下脚本中调用此蜘蛛。蜘蛛使用CrawlRunner类运行,并且在获取项目时会发出p.signals.connect信号,然后调用方法crawler_results并打印抓取的项目。

据我所知,我无法将爬网移入它自己的类,因为那样一来,该信号将无法与PyQt5一起使用

import scrapy
from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtCore import QRunnable, pyqtSlot, QThread, pyqtSignal, QTimer
from PyQt5.QtWidgets import QTableWidgetItem, QLabel
from scrapy import signals
from scrapy.crawler import CrawlerProcess, CrawlerRunner
from twisted.internet import reactor
from scrapy.utils.log import configure_logging

from Layout import Ui_MainWindow
from ZenSpider import ZenSpider


class MainWindow( QtWidgets.QMainWindow, Ui_MainWindow ) :

    def __init__(self, parent=None) :
        super(MainWindow, self).__init__()

        self.setupUi( self )
        self.pushButton.pressed.connect( self.on_url_entered )

    def crawler_results(self, item) :
        print( "SCRAPED AN ITEM" )
        ##Do Something here ##

    def on_url_entered(self) :
        # global userInput
        # userInput = self.urlbar.text()
        configure_logging()
        runner = CrawlerRunner()
        runner.crawl(ZenSpider, domain="google.com.au")
        for p in runner.crawlers :
            p.signals.connect(self.crawler_results, signal=signals.item_scraped)
        reactor.run()

if __name__ == "__main__" :
    app = QtWidgets.QApplication( [] )
    main_window = MainWindow()
    main_window.show()
    app.exec_()

我有一个带有简单QTableWidget和一个按钮的布局

# -*- coding: utf-8 -*-

# Form implementation generated from reading ui file 'basic.ui'
#
# Created by: PyQt5 UI code generator 5.14.2
#
# WARNING! All changes made in this file will be lost!


from PyQt5 import QtCore, QtGui, QtWidgets


class Ui_MainWindow(object):
    def setupUi(self, MainWindow):
        MainWindow.setObjectName("MainWindow")
        MainWindow.resize(1034, 803)
        self.centralwidget = QtWidgets.QWidget(MainWindow)
        self.centralwidget.setObjectName("centralwidget")
        self.tableWidget = QtWidgets.QTableWidget(self.centralwidget)
        self.tableWidget.setGeometry(QtCore.QRect(140, 200, 831, 401))
        self.tableWidget.setObjectName("tableWidget")
        self.tableWidget.setColumnCount(1)
        self.tableWidget.setRowCount(0)
        item = QtWidgets.QTableWidgetItem()
        self.tableWidget.setHorizontalHeaderItem(0, item)
        self.pushButton = QtWidgets.QPushButton(self.centralwidget)
        self.pushButton.setGeometry(QtCore.QRect(880, 610, 89, 25))
        self.pushButton.setObjectName("pushButton")
        MainWindow.setCentralWidget(self.centralwidget)
        self.statusbar = QtWidgets.QStatusBar(MainWindow)
        self.statusbar.setObjectName("statusbar")
        MainWindow.setStatusBar(self.statusbar)

        self.retranslateUi(MainWindow)
        QtCore.QMetaObject.connectSlotsByName(MainWindow)

    def retranslateUi(self, MainWindow):
        _translate = QtCore.QCoreApplication.translate
        MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
        item = self.tableWidget.horizontalHeaderItem(0)
        item.setText(_translate("MainWindow", "URL"))
        self.pushButton.setText(_translate("MainWindow", "Start"))


if __name__ == "__main__":
    import sys
    app = QtWidgets.QApplication(sys.argv)
    MainWindow = QtWidgets.QMainWindow()
    ui = Ui_MainWindow()
    ui.setupUi(MainWindow)
    MainWindow.show()
    sys.exit(app.exec_())

当我按下按钮时,我可以看到搜寻器正在运行,并在输入scraper_results方法时将其打印出来。蜘蛛网将每个项目作为以下值返回

{'size': '164125',
 'status': 200,
 'title': 'Google Advanced Search',
 'url': 'https://www.google.com.au/advanced_search?hl=en-AU&authuser=0'}

页面只是我的拼凑物品

import scrapy

class Page(scrapy.Item):
    url = scrapy.Field()
    size = scrapy.Field()
    status = scrapy.Field()
    title = scrapy.Field()

我的问题是,只要蜘蛛运行,如何将这些数据转换为GUI并自动刷新。这意味着每次刮取一个项目时,GUI都会更新,然后爬网会继续。

到目前为止,我已经探索了

  1. 运气不佳的情况下推迟使用
  2. 插槽/信号,但无法获取GUI进行更新。
  3. Qtimer函数每秒更新一次GUI,但这又没有结果。

任何帮助深表感谢


问题答案:

您必须安装与Qt事件循环兼容的反应堆,例如使用:

  • qt5reactorpython -m pip install qt5reactor),
  • qt-reactorpython -m pip install qt-reactor

    import sys

    from PyQt5 import QtWidgets, QtCore, QtGui

    import qt5reactor

    from scrapy import signals
    from scrapy.crawler import CrawlerRunner
    from scrapy.utils.log import configure_logging

    import twisted

    from Layout import Ui_MainWindow
    from ZenSpider import ZenSpider

    class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
    def init(self, parent=None):
    super(MainWindow, self).init()

        self.setupUi(self)
        self.pushButton.pressed.connect(self.on_url_entered)
        self.tableWidget.horizontalHeader().setSectionResizeMode(
            QtWidgets.QHeaderView.ResizeToContents
        )
    
    def crawler_results(self, item):
        row = self.tableWidget.rowCount()
    
        url = item["url"]
    
        it = QtWidgets.QTableWidgetItem(url)
        self.tableWidget.insertRow(row)
        self.tableWidget.setItem(row, 0, it)
    
    def on_url_entered(self):
        configure_logging()
        runner = CrawlerRunner()
        runner.crawl(ZenSpider, domain="google.com.au")
        for p in runner.crawlers:
            p.signals.connect(self.crawler_results, signal=signals.item_scraped)
    
    def closeEvent(self, event):
        super(MainWindow, self).closeEvent(event)
        twisted.internet.reactor.stop()
    

    if name == “main”:
    app = QtWidgets.QApplication([])

    qt5reactor.install()
    # qreactor.install()
    
    main_window = MainWindow()
    main_window.show()
    twisted.internet.reactor.run()
    


 类似资料: