Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 9da8d55

Browse files
committed
Implements #2557
1 parent 864711b commit 9da8d55

6 files changed

Lines changed: 270 additions & 229 deletions

File tree

lib/core/settings.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
from lib.core.enums import OS
2020

2121
# sqlmap version (<major>.<minor>.<month>.<monthly commit>)
22-
VERSION = "1.1.6.4"
22+
VERSION = "1.1.6.5"
2323
TYPE = "dev" if VERSION.count('.') > 2 and VERSION.split('.')[-1] != '0' else "stable"
2424
TYPE_COLORS = {"dev": 33, "stable": 90, "pip": 34}
2525
VERSION_STRING = "sqlmap/%s#%s" % ('.'.join(VERSION.split('.')[:-1]) if VERSION.count('.') > 2 and VERSION.split('.')[-1] == '0' else VERSION, TYPE)

lib/techniques/error/use.py

Lines changed: 84 additions & 83 deletions
Original file line numberDiff line numberDiff line change
@@ -352,93 +352,94 @@ def errorUse(expression, dump=False):
352352
value = [] # for empty tables
353353
return value
354354

355-
if " ORDER BY " in expression and (stopLimit - startLimit) > SLOW_ORDER_COUNT_THRESHOLD:
356-
message = "due to huge table size do you want to remove "
357-
message += "ORDER BY clause gaining speed over consistency? [y/N] "
358-
359-
if readInput(message, default="N", boolean=True):
360-
expression = expression[:expression.index(" ORDER BY ")]
361-
362-
numThreads = min(conf.threads, (stopLimit - startLimit))
363-
364-
threadData = getCurrentThreadData()
365-
366-
try:
367-
threadData.shared.limits = iter(xrange(startLimit, stopLimit))
368-
except OverflowError:
369-
errMsg = "boundary limits (%d,%d) are too large. Please rerun " % (startLimit, stopLimit)
370-
errMsg += "with switch '--fresh-queries'"
371-
raise SqlmapDataException(errMsg)
372-
373-
threadData.shared.value = BigArray()
374-
threadData.shared.buffered = []
375-
threadData.shared.counter = 0
376-
threadData.shared.lastFlushed = startLimit - 1
377-
threadData.shared.showEta = conf.eta and (stopLimit - startLimit) > 1
378-
379-
if threadData.shared.showEta:
380-
threadData.shared.progress = ProgressBar(maxValue=(stopLimit - startLimit))
381-
382-
if kb.dumpTable and (len(expressionFieldsList) < (stopLimit - startLimit) > CHECK_ZERO_COLUMNS_THRESHOLD):
383-
for field in expressionFieldsList:
384-
if _oneShotErrorUse("SELECT COUNT(%s) FROM %s" % (field, kb.dumpTable)) == '0':
385-
emptyFields.append(field)
386-
debugMsg = "column '%s' of table '%s' will not be " % (field, kb.dumpTable)
387-
debugMsg += "dumped as it appears to be empty"
388-
logger.debug(debugMsg)
389-
390-
if stopLimit > TURN_OFF_RESUME_INFO_LIMIT:
391-
kb.suppressResumeInfo = True
392-
debugMsg = "suppressing possible resume console info because of "
393-
debugMsg += "large number of rows. It might take too long"
394-
logger.debug(debugMsg)
395-
396-
try:
397-
def errorThread():
398-
threadData = getCurrentThreadData()
399-
400-
while kb.threadContinue:
401-
with kb.locks.limit:
402-
try:
403-
valueStart = time.time()
404-
threadData.shared.counter += 1
405-
num = threadData.shared.limits.next()
406-
except StopIteration:
407-
break
408-
409-
output = _errorFields(expression, expressionFields, expressionFieldsList, num, emptyFields, threadData.shared.showEta)
355+
if isNumPosStrValue(count) and int(count) > 1:
356+
if " ORDER BY " in expression and (stopLimit - startLimit) > SLOW_ORDER_COUNT_THRESHOLD:
357+
message = "due to huge table size do you want to remove "
358+
message += "ORDER BY clause gaining speed over consistency? [y/N] "
359+
360+
if readInput(message, default="N", boolean=True):
361+
expression = expression[:expression.index(" ORDER BY ")]
362+
363+
numThreads = min(conf.threads, (stopLimit - startLimit))
364+
365+
threadData = getCurrentThreadData()
366+
367+
try:
368+
threadData.shared.limits = iter(xrange(startLimit, stopLimit))
369+
except OverflowError:
370+
errMsg = "boundary limits (%d,%d) are too large. Please rerun " % (startLimit, stopLimit)
371+
errMsg += "with switch '--fresh-queries'"
372+
raise SqlmapDataException(errMsg)
373+
374+
threadData.shared.value = BigArray()
375+
threadData.shared.buffered = []
376+
threadData.shared.counter = 0
377+
threadData.shared.lastFlushed = startLimit - 1
378+
threadData.shared.showEta = conf.eta and (stopLimit - startLimit) > 1
379+
380+
if threadData.shared.showEta:
381+
threadData.shared.progress = ProgressBar(maxValue=(stopLimit - startLimit))
382+
383+
if kb.dumpTable and (len(expressionFieldsList) < (stopLimit - startLimit) > CHECK_ZERO_COLUMNS_THRESHOLD):
384+
for field in expressionFieldsList:
385+
if _oneShotErrorUse("SELECT COUNT(%s) FROM %s" % (field, kb.dumpTable)) == '0':
386+
emptyFields.append(field)
387+
debugMsg = "column '%s' of table '%s' will not be " % (field, kb.dumpTable)
388+
debugMsg += "dumped as it appears to be empty"
389+
logger.debug(debugMsg)
390+
391+
if stopLimit > TURN_OFF_RESUME_INFO_LIMIT:
392+
kb.suppressResumeInfo = True
393+
debugMsg = "suppressing possible resume console info because of "
394+
debugMsg += "large number of rows. It might take too long"
395+
logger.debug(debugMsg)
396+
397+
try:
398+
def errorThread():
399+
threadData = getCurrentThreadData()
400+
401+
while kb.threadContinue:
402+
with kb.locks.limit:
403+
try:
404+
valueStart = time.time()
405+
threadData.shared.counter += 1
406+
num = threadData.shared.limits.next()
407+
except StopIteration:
408+
break
410409

411-
if not kb.threadContinue:
412-
break
410+
output = _errorFields(expression, expressionFields, expressionFieldsList, num, emptyFields, threadData.shared.showEta)
413411

414-
if output and isListLike(output) and len(output) == 1:
415-
output = output[0]
412+
if not kb.threadContinue:
413+
break
416414

417-
with kb.locks.value:
418-
index = None
419-
if threadData.shared.showEta:
420-
threadData.shared.progress.progress(time.time() - valueStart, threadData.shared.counter)
421-
for index in xrange(1 + len(threadData.shared.buffered)):
422-
if index < len(threadData.shared.buffered) and threadData.shared.buffered[index][0] >= num:
423-
break
424-
threadData.shared.buffered.insert(index or 0, (num, output))
425-
while threadData.shared.buffered and threadData.shared.lastFlushed + 1 == threadData.shared.buffered[0][0]:
426-
threadData.shared.lastFlushed += 1
427-
threadData.shared.value.append(threadData.shared.buffered[0][1])
428-
del threadData.shared.buffered[0]
429-
430-
runThreads(numThreads, errorThread)
431-
432-
except KeyboardInterrupt:
433-
abortedFlag = True
434-
warnMsg = "user aborted during enumeration. sqlmap "
435-
warnMsg += "will display partial output"
436-
logger.warn(warnMsg)
415+
if output and isListLike(output) and len(output) == 1:
416+
output = output[0]
417+
418+
with kb.locks.value:
419+
index = None
420+
if threadData.shared.showEta:
421+
threadData.shared.progress.progress(time.time() - valueStart, threadData.shared.counter)
422+
for index in xrange(1 + len(threadData.shared.buffered)):
423+
if index < len(threadData.shared.buffered) and threadData.shared.buffered[index][0] >= num:
424+
break
425+
threadData.shared.buffered.insert(index or 0, (num, output))
426+
while threadData.shared.buffered and threadData.shared.lastFlushed + 1 == threadData.shared.buffered[0][0]:
427+
threadData.shared.lastFlushed += 1
428+
threadData.shared.value.append(threadData.shared.buffered[0][1])
429+
del threadData.shared.buffered[0]
430+
431+
runThreads(numThreads, errorThread)
432+
433+
except KeyboardInterrupt:
434+
abortedFlag = True
435+
warnMsg = "user aborted during enumeration. sqlmap "
436+
warnMsg += "will display partial output"
437+
logger.warn(warnMsg)
437438

438-
finally:
439-
threadData.shared.value.extend(_[1] for _ in sorted(threadData.shared.buffered))
440-
value = threadData.shared.value
441-
kb.suppressResumeInfo = False
439+
finally:
440+
threadData.shared.value.extend(_[1] for _ in sorted(threadData.shared.buffered))
441+
value = threadData.shared.value
442+
kb.suppressResumeInfo = False
442443

443444
if not value and not abortedFlag:
444445
value = _errorFields(expression, expressionFields, expressionFieldsList)

0 commit comments

Comments
 (0)