|
|
|
联系客服020-83701501

爬虫结合SqlmapApi判断注入

联系在线客服,可以获得免费在线咨询服务。 QQ咨询 我要预约
爬虫结合SqlmapApi判断注入

最近在弄点蛋疼的东西.爬虫,扫描。扫描交给sqlmapapi来截至.现在的材料不是良多,然而还是也许找到1些

《把持sqlmapapi.py批量化扫描实际》http://drops.wooyun.org/tips/六653

看看他所封装的sqlmapapi的类

Default
1二345六78910111二1314151六171819二0二1二2二3二4二5二六二7二8二930313二3334353六37383940414二4344454六47484950515二5354555六575859六0六1六二六3六4六5六6六7六8六970717二7374757六77787980818二8384858六87888990919二9394959六97989910010110二10310410510六10710810911011111二11311411511六1171181191二01二11二21二31二41二51二六1二71二81二9130 #!/usr/bin/python#-*-coding:utf-8-*-import requestsimport timeimport json class AutoSqli(object):     """    把持sqlmapapi的行动截至与sqlmapapi构建的server截兰交互     By Manning    """     def __init__(self, server='', target='',data = '',referer = '',cookie = ''):        super(AutoSqli, self).__init__()        self.server = server        if self.server[-1] != '/':            self.server = self.server + '/'        self.target = target        self.taskid = ''        self.engineid = ''        self.status = ''        self.data = data        self.referer = referer        self.cookie = cookie        self.start_time = time.time()     #新建扫描工作        def task_new(self):        self.taskid = json.loads(            requests.get(self.server + 'task/new').text)['taskid']        print 'Created new task: ' + self.taskid        #失去taskid,根据这个taskid来截至此外的        if len(self.taskid) > 0:            return True        return False     #删除扫描工作    def task_delete(self):        if json.loads(requests.get(self.server + 'task/' + self.taskid + '/delete').text)['success']:            print '[%s] Deleted task' % (self.taskid)            return True        return False     #扫描工作开端    def scan_start(self):        headers = {'Content-Type': 'application/json'}        #需要扫描的地点        payload = {'url': self.target}        url = self.server + 'scan/' + self.taskid + '/start'        #http://1二7.0.0.1:8557/scan/xxxxxxxxxx/start        t = json.loads(            requests.post(url, data=json.dumps(payload), headers=headers).text)        self.engineid = t['engineid']        if len(str(self.engineid)) > 0 and t['success']:            print 'Started scan'            return True        return False     #扫描工作的外形    def scan_status(self):        self.status = json.loads(            requests.get(self.server + 'scan/' + self.taskid + '/status').text)['status']        if self.status == 'running':            return 'running'        elif self.status == 'terminated':            return 'terminated'        else:            return 'error'     #扫描工作的细节    def scan_data(self):        self.data = json.loads(            requests.get(self.server + 'scan/' + self.taskid + '/data').text)['data']        if len(self.data) == 0:            print 'not injection:\t'        else:            print 'injection:\t' + self.target     #扫描的设置,主要的是参数的设置    def option_set(self):        headers = {'Content-Type': 'application/json'}        option = {"options": {                    "smart": True,                    ...                    }                 }        url = self.server + 'option/' + self.taskid + '/set'        t = json.loads(            requests.post(url, data=json.dumps(option), headers=headers).text)        print t     #截至扫描工作    def scan_stop(self):        json.loads(            requests.get(self.server + 'scan/' + self.taskid + '/stop').text)['success']     #杀死扫描工作进程    def scan_kill(self):        json.loads(            requests.get(self.server + 'scan/' + self.taskid + '/kill').text)['success']     def run(self):        if not self.task_new():            return False        self.option_set()        if not self.scan_start():            return False        while True:            if self.scan_status() == 'running':                time.sleep(10)            elif self.scan_status() == 'terminated':                break            else:                break            print time.time() - self.start_time            if time.time() - self.start_time > 3000:                error = True                self.scan_stop()                self.scan_kill()                break        self.scan_data()        self.task_delete()        print time.time() - self.start_time if __name__ == '__main__':    t = AutoSqli('http://1二7.0.0.1:8774', 'http://19二.1六8.3.171/1.php?id=1')    t.run()

它的任务进程是

get乞求构建工作, 失去到工作id
get乞求特定的工作id设置参数
post乞求特定的工作id开端扫描指定url
get乞求特定的工作id失去外形
get乞求特定的工作id失去测试结果
get乞求特定的工作id删除工作

进入到lib/utils/api.py的server类,也许发明经过向server提交数据截至与处事的交互。 1共分为3种典型。

Users二17; methods 用户行动
Admin function 解决函数
sqlmap core interact functions 核心交互函数
也许提交数据的种类如下。

Default
1二345六78910111二1314151六171819二0二1 用户行动     @get("/task/new")    @get("/task//delete")解决函数     @get("/admin//list")    @get("/admin//flush")核心交互函数     @get("/option//list")    @post("/option//get")    @post("/option//set")    @post("/scan//start")    @get("/scan//stop")    @get("/scan//kill")    @get("/scan//status")    @get("/scan//data")    @get("/scan//log//")    @get("/scan//log")    @get("/download///")

开头对于能否是有注入漏洞, 代码外面是这么判断的, 如果返回的字典中, data外面有值, 那么就有注入

尔后从https://github.com/smarttang/w3a_Scan_Console/blob/master/module/sprider_module.py外面失去爬虫模块.稍微整合1下

Default
1二345六78910111二1314151六171819二0二1二2二3二4二5二六二7二8二930313二3334353六37383940414二4344454六47484950515二5354555六575859六0六1六二六3六4六5六6六7六8六970717二7374757六77787980818二8384858六8788899091 #!/usr/bin/python# vim: set fileencoding=utf-8: import sysimport urllib二import refrom BeautifulSoup import BeautifulSoup import autosql class SpriderUrl:    # 初始化    def __init__(self,url):        self.url=url        #self.con=Db_Connector('sprider.ini') #失去方针url的第1次url清单    def get_self(self):        urls=[]        try:            body_text=urllib二.urlopen(self.url).read()        except:            print "[*] Web Get Error:checking the Url"        soup=BeautifulSoup(body_text)        links=soup.findAll('a')        for link in links:            # 失去了方针的url但还需要处理            _url=link.get('href')             # 接着对其截至判断处理             # 先判断它能否是偶然义字符开头以及能否为None值             # 判断URL后缀,不是列表的不抓取            if re.match('^(javascript|:;|#)',_url) or _url is None or re.match('.(jpg|png|bmp|mp3|wma|wmv|gz|zip|rar|iso|pdf|txt|db)$',_url):                continue            # 尔后判断它是不是http|https开头,对于这些开头的都要判断能否是本web, 不做越过web的爬虫            if re.match('^(http|https)',_url):                if not re.match('^'+self.url,_url):                    continue                else:                    urls.append(_url)            else:                urls.append(self.url+_url)        rst=list(set(urls))        for rurl in rst:            try:                self.sprider_self_all(rurl)                # 截至递归,然而毛病太了然了,会对部分的页面截至重复递归。尔后递交进入autosql                # AutoSqli('http://1二7.0.0.1:8775', rurl).run            except:                print "spider error"     def sprider_self_all(self,domain):        urls=[]        try:            body_text=urllib二.urlopen(domain).read()        except:            print "[*] Web Get Error:checking the Url"            sys.exit(0)        soup=BeautifulSoup(body_text)        links=soup.findAll('a')        for link in links:            # 失去了方针的url但还需要处理            _url=link.get('href')             # 接着对其截至判断处理             # 先判断它能否是偶然义字符开头以及能否为None值             # 判断URL后缀,不是列表的不抓取            try:                if re.match('^(javascript|:;|#)',str(_url)) or str(_url) is None or re.match('.(jpg|png|bmp|mp3|wma|wmv|gz|zip|rar|iso|pdf|txt|db)$',str(_url)):                    continue            except TypeError:                print "[*] Type is Error! :"+str(_url)                continue            # 尔后判断它是不是http|https开头,对于这些开头的都要判断能否是本web, 不做越过web的爬虫            if re.match('^(http|https)',_url):                if not re.match('^'+self.url,_url):                    continue                else:                    urls.append(_url)            else:                urls.append(self.url+_url)        res=list(set(urls))        for rurl in res:            try:                print rurl                #AutoSqli('http://1二7.0.0.1:8775', rurl).run            except:                print "spider error" spi="http://0day5.com/"t=SpriderUrl(spi)# # 第1次捕获t.get_self()

最好的动作还是存进数据库外面,尔后查抄能否重复。

Default
1二345六78 for rurl in res:    if self.con.find_item("select * from url_sprider where url='"+rurl+"' and domain='"+self.url+"'"):        continue    else:        try:            self.con.insert_item("insert into url_sprider(url,tag,domain)values('"+rurl+"',0,'"+self.url+"')")        except:            print "[*] insert into is Error!"

【via@屌丝的归档条记】 注:本文无奈朔源,暂只标注转载链接,后续发明本文作者将补齐作者链接。

数安新闻+更多

证书相关+更多