爬虫的编写
爬虫的思路上面讲过了,先完成 URL 的管理,我们单独将它作为一个类,文件保存在/lib/core/UrlManager.py
#-*- coding:utf-8 -*- class UrlManager(object): def __init__(self): self.new_urls = set() self.old_urls = set() def add_new_url(self, url): if url is None: return if url not in self.new_urls and url not in self.old_urls: self.new_urls.add(url) def add_new_urls(self, urls): if urls is None or len(urls) == 0: return for url in urls: self.add_new_url(url) def has_new_url(self): return len(self.new_urls) != 0 def get_new_url(self): new_url = self.new_urls.pop() self.old_urls.add(new_url) return new_url
为了方便,我们也将下载功能单独作为一个类使用,文件保存在lib/core/Downloader.py
#-*- coding:utf-8 -*- import requests class Downloader(object): def get(self, url): r = requests.get(url, timeout = 10) if r.status_code != 200: return None _str = r.text return _str def post(self, url, data): r = requests.post(url, data) _str = r.text return _str def download(self, url, htmls): if url is None: return None _str = {} _str["url"] = url try: r = requests.get(url, timeout = 10) if r.status_code != 200: return None _str["html"] = r.text except Exception as e: return None htmls.append(_str)
特别说明,因为我们要写的爬虫是多线程的,所以类中有个download方法是专门为多线程下载专用的
在lib/core/Spider.py中编写爬虫
#-*- coding:utf-8 -*- from lib.core import Downloader, UrlManager import threading from urllib import parse from urllib.parse import urljoin from bs4 import BeautifulSoup class SpiderMain(object): def __init__(self, root, threadNum): self.urls = UrlManager.UrlManager() self.download = Downloader.Downloader() self.root = root self.threadNum = threadNum def _judge(self, domain, url): if (url.find(domain) != -1): return True return False def _parse(self, page_url, content): if content is None: return soup = BeautifulSoup(content, 'html.parser') _news = self._get_new_urls(page_url, soup) return _news def _get_new_urls(self, page_url, soup): new_urls = set() links = soup.find_all('a') for link in links: new_url = link.get('href') new_full_url = urljoin(page_url, new_url) if (self._judge(self.root, new_full_url)): new_urls.add(new_full_url) return new_urls def craw(self): self.urls.add_new_url(self.root) while self.urls.has_new_url(): _content = [] th = [] for i in list(range(self.threadNum)): if self.urls.has_new_url() is False: break new_url = self.urls.get_new_url() ## sql check try: if (sqlcheck.sqlcheck(new_url)): print("url:%s sqlcheck is valueable" % new_url) except: pass print("craw:" + new_url) t = threading.Thread(target = self.download.download, args = (new_url, _content)) t.start() th.append(t) for t in th: t.join() for _str in _content: if _str is None: continue new_urls = self._parse(new_url, _str["html"]) self.urls.add_new_urls(new_urls)
爬虫通过调用craw()方法传入一个网址进行爬行,然后采用多线程的方法下载待爬行的网站,下载之后的源码用_parse方法调用BeautifulSoup进行解析,之后将解析出的 URL 列表丢入 URL 管理器,这样循环,最后只要爬完了网页,爬虫就会停止
threading库可以自定义需要开启的线程数,线程开启后,每个线程会得到一个 url 进行下载,然后线程会阻塞,阻塞完毕后线程放行
爬虫和 SQL 检查的结合
在lib/core/Spider.py文件引用一下from script import sqlcheck,在craw()方法中,取出新的 URL 地方调用一下
##sql check try: if(sqlcheck.sqlcheck(new_url)): print("url:%s sqlcheck is valueable"%new_url) except: pass
用try检测可能出现的异常,绕过它,在文件w8ay.py中进行测试
#-*- coding:utf-8 -*- ''' Name: w8ayScan Author: mathor Copyright (c) 2019 ''' import sys from lib.core.Spider import SpiderMain def main(): root = "https://wmathor.com" threadNum = 50 w8 = SpiderMain(root, threadNum) w8.craw() if __name__ == "__main__": main()
很重要的一点!为了使得lib和script文件夹中的.py文件可以可以被认作是模块,请在lib、lib/core和script文件夹中创建__init__.py文件,文件中什么都不需要写
总结
SQL 注入检测通过一些payload使页面出错,判断原始网页,正确网页,错误网页即可检测出是否存在 SQL 注入漏洞
通过匹配出 sql 报错出来的信息,可以正则判断所用的数据库
好了,以上就是这篇文章的全部内容了,希望本文的内容对大家的学习或者工作具有一定的参考学习价值,如果有疑问大家可以留言交流,谢谢大家对的支持。
更多SQL内容来自木庄网络博客