Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit 0cee987

Browse filesBrowse files
committed
chapter 5:暴力破解目录和文件
1 parent f7082a6 commit 0cee987
Copy full SHA for 0cee987

File tree

2 files changed

+85
-87
lines changed
Filter options

2 files changed

+85
-87
lines changed

‎我手敲的代码(中文注释)/.idea/workspace.xml

Copy file name to clipboardExpand all lines: 我手敲的代码(中文注释)/.idea/workspace.xml
+4-4Lines changed: 4 additions & 4 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.
+81-83Lines changed: 81 additions & 83 deletions
Original file line numberDiff line numberDiff line change
@@ -1,89 +1,87 @@
1-
#-*- coding:utf8 -*-
2-
3-
import urllib2
4-
import threading
5-
import Queue
6-
import urllib
7-
8-
threads = 50
9-
target_url = "http://testphp.vulnweb.com"
10-
wordlist_file = "./all.txt"
11-
resume = None #作者说用于网络中断时,延续上一个尝试的字符,而不用从头开始,这里好像没用到
12-
user_agent = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.80 Safari/537.36"
13-
14-
15-
def built_wordlist(wordlist_file):
16-
#读入字典文件
17-
fd = open(wordlist_file, "rb")
18-
raw_words = fd.readlines()
19-
fd.close()
20-
21-
found_resume = False
22-
words = Queue.Queue()
23-
24-
for word in raw_words:
25-
#删除字符串末尾的空格
26-
word = word.rstrip()
1+
#-*- coding:utf8 -*-
2+
3+
import urllib2
4+
import threading
5+
import Queue
6+
import urllib
7+
8+
threads = 50
9+
target_url = "http://testphp.vulnweb.com"
10+
wordlist_file = "./all.txt"
11+
resume = None #作者说用于网络中断时,延续上一个尝试的字符串,而不用从头开始,这里好像没用到
12+
user_agent = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.80 Safari/537.36"
13+
14+
15+
def built_wordlist(wordlist_file):
16+
#读入字典文件
17+
fd = open(wordlist_file, "rb")
18+
raw_words = fd.readlines()
19+
fd.close()
20+
21+
found_resume = False
22+
words = Queue.Queue()
23+
24+
for word in raw_words:
25+
#删除字符串末尾的空格
26+
word = word.rstrip()
2727
#如果是延续上一次
28-
if resume is not None:
29-
30-
if found_resume:
31-
words.put(word)
32-
else:
33-
if word == resume:
34-
found_resume = True
35-
print "Resuming wordlist from: %s" % resume
36-
else:
37-
words.put(word)
38-
return words
39-
40-
def dir_bruter(word_queue, extentsions=None):
41-
42-
while not word_queue.empty():
43-
#获取一个字符串
44-
attempt = word_queue.get()
45-
#print attempt
28+
if resume is not None:
29+
30+
if found_resume:
31+
words.put(word)
32+
else:
33+
if word == resume:
34+
found_resume = True
35+
print "Resuming wordlist from: %s" % resume
36+
else:
37+
words.put(word)
38+
return words
39+
40+
def dir_bruter(word_queue, extentsions=None):
41+
42+
while not word_queue.empty():
43+
attempt = word_queue.get()
4644

4745
#用于储存要尝试的url
48-
attempt_list = []
49-
50-
#检查是否有文件扩展名,如果没有就是我们要爆破路径,否则爆破文件
51-
if "." not in attempt:
52-
attempt_list.append("/%s/" % attempt)
53-
else:
54-
attempt_list.append("/%s" % attempt)
55-
56-
#如果我们想暴力破解扩展名
57-
if extentsions:
58-
for extentsion in extentsions:
59-
attempt_list.append("/%s%s" % (attempt, extentsion))
60-
61-
#迭代我们要尝试的文件列表
62-
for brute in attempt_list:
63-
#构造url
64-
url = "%s%s" % (target_url, urllib.quote(brute))
65-
#print url
66-
try:
67-
headers = {}
68-
headers['User-Agent'] = user_agent
69-
r = urllib2.Request(url, headers=headers)
70-
71-
response = urllib2.urlopen(r)
46+
attempt_list = []
47+
48+
#检查是否有文件扩展名,如果没有就是我们要爆破路径,否则爆破文件
49+
if "." not in attempt:
50+
attempt_list.append("/%s/" % attempt)
51+
else:
52+
attempt_list.append("/%s" % attempt)
53+
54+
#如果我们想暴力破解扩展名
55+
if extentsions:
56+
for extentsion in extentsions:
57+
attempt_list.append("/%s%s" % (attempt, extentsion))
58+
59+
#迭代我们要尝试的文件列表
60+
for brute in attempt_list:
61+
#构造url
62+
url = "%s%s" % (target_url, urllib.quote(brute))
63+
#print url
64+
try:
65+
headers = {}
66+
headers['User-Agent'] = user_agent
67+
r = urllib2.Request(url, headers=headers)
68+
69+
response = urllib2.urlopen(r)
7270
#print response.__dict__
73-
if len(response.read()):
74-
print "[%d] => %s" % (response.code, url)
75-
#用e接收URLError的信息
76-
except urllib2.URLError,e:
77-
# code属性存在,并且code不是404
78-
if hasattr(e, 'code') and e.code != 404:
79-
print "!!! %d => %s" % (e.code, url)
80-
pass
81-
82-
83-
word_queue = built_wordlist(wordlist_file)
84-
extentsions = [".php", ".bak", ".orig",".inc"]
71+
if len(response.read()):
72+
print "[%d] => %s" % (response.code, url)
73+
#用e接收URLError的信息
74+
except urllib2.URLError,e:
75+
# code属性存在,并且code不是404
76+
if hasattr(e, 'code') and e.code != 404:
77+
print "!!! %d => %s" % (e.code, url)
78+
pass
79+
80+
81+
word_queue = built_wordlist(wordlist_file)
82+
extentsions = [".php", ".bak", ".orig",".inc"]
8583

8684
#开启多线程扫描
87-
for i in range(threads):
88-
t = threading.Thread(target=dir_bruter, args=(word_queue, extentsions))
89-
t.start()
85+
for i in range(threads):
86+
t = threading.Thread(target=dir_bruter, args=(word_queue, extentsions))
87+
t.start()

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.