访问刺探

Readme

脚本功能:

​ 在新建文本文档.txt中提取所有80和443能访问的链接,这能使你极快地信息搜集子域名后进行WEB存活测试(当然,你需要的不仅仅是关注80和443,在此脚本使用后仍需要对子域名的其他端口进行测试,这只是最快提取成功的方式)

脚本环境

  • Python3
  • 第三方库:requests、gevent

脚本

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import requests
from gevent import socket, monkey; monkey.patch_all()
import gevent
from gevent.pool import Pool
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings (InsecureRequestWarning)


pools = Pool(1000)

headers = {
'Connection': 'close',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
}


urls = []
protocal_urls = []
filename = "新建文本文档.txt"
gevent_list = []
EXCEPTION = ""
EXCEPTION_URL = []
NOT_200 = ""

def handle(url):
global EXCEPTION
global NOT_200
try:
url = 'http://' + url
res = requests.get(url=url, headers=headers)
#print (res.text)
if 200<= res.status_code <= 400 and len(res.text) > 200:
print(res.status_code, ' ', url)
urls.append(url.replace('http://', ''))
protocal_urls.append(url)
if 200<= res.status_code <= 400 and len(res.text) < 200:
#print('[-] too short, len: ', len(res.text), ' ', url)
urls.append(url.replace('http://', ''))
protocal_urls.append(url)

else:
NOT_200 += (str (res.status_code) + ' ' + url)
urls.append (url.replace ('http://', ''))
protocal_urls.append (url)
except Exception as e:
EXCEPTION_URL.append(url.replace('http://',''))
EXCEPTION += ('[--]' + ' ' + url + ' ' +str(e) + '\n')

def handle2(url):
global EXCEPTION
global NOT_200
try:
url = 'https://' + url
res = requests.get(url=url, headers=headers, verify=False)
if (url.replace('https://','')) not in urls:
if 200<= res.status_code <= 400 and len (res.text) > 200:
print (res.status_code, ' ', url)
urls.append (url.replace ('https://', ''))
protocal_urls.append (url)
if 200<= res.status_code <= 400 and len (res.text) < 200:
#print ('[-] too short, len: ', len (res.text), ' ', url)
urls.append (url.replace ('https://', ''))
protocal_urls.append (url)
else:
NOT_200 += (str (res.status_code) + ' ' + url + '\n')
urls.append (url.replace ('http://', ''))
protocal_urls.append (url)
except Exception as e:
if (url.replace('https://','')) not in EXCEPTION_URL:
EXCEPTION += ('[--]' + ' ' + url + ' ' + str (e) + '\n')


f = open(filename, 'r')
lis = f.readlines()
filter = []
for j in lis:
j = j.strip()
if j not in filter:
filter.append(j)
lis = filter
f.close()
print("[+] there are {} links.".format(len(lis)))

for i in lis:
i = i.strip()
print(i)
g = pools.spawn(handle, i)
gevent_list.append(g)
for g in gevent_list:
g.join()

gevent_list = []
for i in lis:
i = i.strip()
g = pools.spawn(handle2, i)
gevent_list.append(g)
for g in gevent_list:
g.join()


#print(NOT_200)
#print(EXCEPTION)

本文标题:访问刺探

文章作者:

发布时间:2019年08月08日 - 21:00:52

最后更新:2019年08月08日 - 21:00:52

原始链接:http://laker.xyz/2019/08/08/%E8%AE%BF%E9%97%AE%E5%88%BA%E6%8E%A2/

许可协议: 署名-非商业性使用-禁止演绎 4.0 国际 转载请保留原文链接及作者。