From e5f07cc4d3acbeef93149baf96b71283f4961229 Mon Sep 17 00:00:00 2001 From: Rasmus Lerchedahl Petersen Date: Mon, 28 Jun 2021 13:21:02 +0200 Subject: [PATCH 01/22] Python: inline test of regex components - Added naive implementation of `charRange` so the test can run. - Made predicates public as needed. --- python/ql/src/semmle/python/regex.qll | 30 +++++++- .../regex/SubstructureTests.expected | 0 .../library-tests/regex/SubstructureTests.ql | 75 +++++++++++++++++++ .../test/library-tests/regex/charRangeTest.py | 32 ++++++++ .../test/library-tests/regex/charSetTest.py | 29 +++++++ .../regex/escapedCharacterTest.py | 23 ++++++ .../ql/test/library-tests/regex/groupTest.py | 4 + 7 files changed, 190 insertions(+), 3 deletions(-) create mode 100644 python/ql/test/library-tests/regex/SubstructureTests.expected create mode 100644 python/ql/test/library-tests/regex/SubstructureTests.ql create mode 100644 python/ql/test/library-tests/regex/charRangeTest.py create mode 100644 python/ql/test/library-tests/regex/charSetTest.py create mode 100644 python/ql/test/library-tests/regex/escapedCharacterTest.py create mode 100644 python/ql/test/library-tests/regex/groupTest.py diff --git a/python/ql/src/semmle/python/regex.qll b/python/ql/src/semmle/python/regex.qll index 365c36d0d2f6..d201b236bf6e 100644 --- a/python/ql/src/semmle/python/regex.qll +++ b/python/ql/src/semmle/python/regex.qll @@ -143,6 +143,26 @@ abstract class RegexString extends Expr { ) } + /** + * Holds if the character set starting at `charset_start` contains a character range + * with lower bound found between `start` and `lower_end` + * and upper bound found between `upper_start` and `end`. + */ + predicate charRange(int charset_start, int start, int lower_end, int upper_start, int end) { + // mirror logic from `simpleCharacter` + exists(int x, int y | + this.charSet(charset_start, y) and + this.char_set_start(charset_start, x) + | + x <= start and + this.simpleCharacter(start, lower_end) and + this.nonEscapedCharAt(lower_end) = "-" and + lower_end + 1 = upper_start and + this.simpleCharacter(upper_start, end) and + end < y + ) + } + predicate escapingChar(int pos) { this.escaping(pos) = true } private boolean escaping(int pos) { @@ -192,7 +212,12 @@ abstract class RegexString extends Expr { not exists(int i | start + 2 < i and i < end - 1 | this.getChar(i) = "}") } - private predicate escapedCharacter(int start, int end) { + /** + * Holds if an escaped character is found between `start` and `end`. + * Escaped characters include hex values, octal values and named escapes, + * but excludes backreferences. + */ + predicate escapedCharacter(int start, int end) { this.escapingChar(start) and not exists(this.getText().substring(start + 1, end + 1).toInt()) and ( @@ -221,10 +246,9 @@ abstract class RegexString extends Expr { exists(int x, int y | this.charSet(x, y) and index in [x + 1 .. y - 2]) } - /* + /** * 'simple' characters are any that don't alter the parsing of the regex. */ - private predicate simpleCharacter(int start, int end) { end = start + 1 and not this.charSet(start, _) and diff --git a/python/ql/test/library-tests/regex/SubstructureTests.expected b/python/ql/test/library-tests/regex/SubstructureTests.expected new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/python/ql/test/library-tests/regex/SubstructureTests.ql b/python/ql/test/library-tests/regex/SubstructureTests.ql new file mode 100644 index 000000000000..f92f051ba783 --- /dev/null +++ b/python/ql/test/library-tests/regex/SubstructureTests.ql @@ -0,0 +1,75 @@ +import python +import TestUtilities.InlineExpectationsTest +private import semmle.python.regex + +class CharacterSetTest extends InlineExpectationsTest { + CharacterSetTest() { this = "CharacterSetTest" } + + override string getARelevantTag() { result = "charSet" } + + override predicate hasActualResult(Location location, string element, string tag, string value) { + exists(location.getFile().getRelativePath()) and + location.getFile().getBaseName() = "charSetTest.py" and + exists(Regex re, int start, int end | + re.charSet(start, end) and + location = re.getLocation() and + element = re.toString().substring(start, end) and + value = start + ":" + end and + tag = "charSet" + ) + } +} + +class CharacterRangeTest extends InlineExpectationsTest { + CharacterRangeTest() { this = "CharacterRangeTest" } + + override string getARelevantTag() { result = "charRange" } + + override predicate hasActualResult(Location location, string element, string tag, string value) { + exists(location.getFile().getRelativePath()) and + location.getFile().getBaseName() = "charRangeTest.py" and + exists(Regex re, int start, int lower_end, int upper_start, int end | + re.charRange(_, start, lower_end, upper_start, end) and + location = re.getLocation() and + element = re.toString().substring(start, end) and + value = start + ":" + lower_end + "-" + upper_start + ":" + end and + tag = "charRange" + ) + } +} + +class EscapeTest extends InlineExpectationsTest { + EscapeTest() { this = "EscapeTest" } + + override string getARelevantTag() { result = "escapedCharacter" } + + override predicate hasActualResult(Location location, string element, string tag, string value) { + exists(location.getFile().getRelativePath()) and + location.getFile().getBaseName() = "escapedCharacterTest.py" and + exists(Regex re, int start, int end | + re.escapedCharacter(start, end) and + location = re.getLocation() and + element = re.toString().substring(start, end) and + value = start + ":" + end and + tag = "escapedCharacter" + ) + } +} + +class GroupTest extends InlineExpectationsTest { + GroupTest() { this = "GroupTest" } + + override string getARelevantTag() { result = "group" } + + override predicate hasActualResult(Location location, string element, string tag, string value) { + exists(location.getFile().getRelativePath()) and + location.getFile().getBaseName() = "groupTest.py" and + exists(Regex re, int start, int end | + re.group(start, end) and + location = re.getLocation() and + element = re.toString().substring(start, end) and + value = start + ":" + end and + tag = "group" + ) + } +} diff --git a/python/ql/test/library-tests/regex/charRangeTest.py b/python/ql/test/library-tests/regex/charRangeTest.py new file mode 100644 index 000000000000..ac862e1962c8 --- /dev/null +++ b/python/ql/test/library-tests/regex/charRangeTest.py @@ -0,0 +1,32 @@ +import re + +re.compile(r'[]-[]') #$ MISSING: charRange=1:2-3:4 +re.compile(r'[---]') #$ MISSING: charRange=1:2-3:4 +re.compile(r'[\---]') #$ MISSING: charRange=1:3-4:5 +re.compile(r'[--\-]') #$ MISSING: charRange=1:2-3:5 +re.compile(r'[\--\-]') #$ cMISSING: harRange=1:3-4:6 +re.compile(r'[0-9-A-Z]') #$ MISSING: charRange=1:2-3:4 charRange=5:6-7:8 +re.compile(r'[0\-9-A-Z]') #$ MISSING: charRange=4:5-6:7 +re.compile(r'[0--9-A-Z]') #$ MISSING: charRange=1:2-3:4 charRange=4:5-6:7 + +re.compile(r'[^A-Z]') #$ MISSING: charRange=2:3-4:5 + +re.compile(r'[\0-\09]') #$ MISSING: charRange=1:3-4:7 + +re.compile(r'[\0123-5]') #$ MISSING: charRange=5:6-7:8 + + +#Negative lookahead +re.compile(r'(?!not-this)^[A-Z_]+$') #$ MISSING: charRange=14:15-16:17 +#Negative lookbehind +re.compile(r'^[A-Z_]+$(?[\w]+)|') #$ MISSING: charSet=9:13 +re.compile(r'\|\[\][123]|\{\}') #$ MISSING: charSet=6:11 +re.compile(r'[^A-Z]') #$ MISSING: charSet=0:6 +re.compile("[]]") #$ charSet=0:3 +re.compile("[][]") #$ MISSING: charSet=0:4 +re.compile("[^][^]") #$ MISSING: charSet=0:6 +re.compile("[.][.]") #$ charSet=0:3 MISSING: charSet=3:6 +re.compile("[[]]") #$ charSet=0:3 +re.compile("[^]]") #$ MISSING: charSet=0:4 +re.compile("[^-]") #$ MISSING: charSet=0:4 +re.compile("[]-[]") #$ MISSING: charSet=0:5 +re.compile("[^]-[]") #$ MISSING: charSet=0:6 + +re.compile("]]][[[[]") #$ MISSING: charSet=3:8 + + +#ODASA-3985 +#Half Surrogate pairs +re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]') #$ MISSING: charSet=0:5 charSet=5:10 +#Outside BMP +re.compile(u'[\U00010000-\U0010ffff]') #$ MISSING: charSet=0:5 + +#Misparsed on LGTM +re.compile(r"\[(?P[^[]*)\]\((?P[^)]*)") #$ MISSING: charSet=10:14 charSet=28:32 + + # parses wrongly, sees this \|/ as a char set start +re.compile(r'''(?:[\s;,"'<>(){}|[\]@=+*]|:(?![/\\]))+''') #$ MISSING: charSet=3:25 charSet=30:35 diff --git a/python/ql/test/library-tests/regex/escapedCharacterTest.py b/python/ql/test/library-tests/regex/escapedCharacterTest.py new file mode 100644 index 000000000000..200b1a7a7af7 --- /dev/null +++ b/python/ql/test/library-tests/regex/escapedCharacterTest.py @@ -0,0 +1,23 @@ +import re + +re.compile(r'\b') #$ escapedCharacter=0:2 +re.compile(r'''\b''') #$ escapedCharacter=0:2 +re.compile(r"\b") #$ escapedCharacter=0:2 +re.compile(u"\b") # not escape +re.compile("\b") # not escape +re.compile(r'\\\b') #$ escapedCharacter=0:2 MISSING: escapedCharacter=2:4 +re.compile(r'[\---]') #$ escapedCharacter=1:3 +re.compile(r'[--\-]') #$ MISSING: escapedCharacter=3:5 +re.compile(r'[\--\-]') #$ escapedCharacter=1:3 MISSING: escapedCharacter=4:6 +re.compile(r'[0\-9-A-Z]') #$ MISSING: escapedCharacter=2:4 +re.compile(r'[\0-\09]') #$ escapedCharacter=1:3 MISSING: escapedCharacter=4:7 +re.compile(r'[\0123-5]') #$ MISSING: escapedCharacter=1:5 + +#ODASA-3985 +#Half Surrogate pairs +re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]') # not escapes +#Outside BMP +re.compile(u'[\U00010000-\U0010ffff]') # not escapes + +#Misparsed on LGTM +re.compile(r"\[(?P[^[]*)\]\((?P[^)]*)") #$ escapedCharacter=0:2 MISSING: escapedCharacter=16:18 escapedCharacter=18:20 diff --git a/python/ql/test/library-tests/regex/groupTest.py b/python/ql/test/library-tests/regex/groupTest.py new file mode 100644 index 000000000000..634cf646fdf9 --- /dev/null +++ b/python/ql/test/library-tests/regex/groupTest.py @@ -0,0 +1,4 @@ +import re + +re.compile(r'(?P\w+) (?P\w+)') #$ MISSING: group=0:14 group=15:30 +re.compile(r'([)(])') #$ MISSING: group=0:6 From 74ca1d00b98698eff9c837aa965a67bec706733a Mon Sep 17 00:00:00 2001 From: Rasmus Lerchedahl Petersen Date: Mon, 28 Jun 2021 14:45:41 +0200 Subject: [PATCH 02/22] Python: More precise regex parsing --- python/ql/src/semmle/python/regex.qll | 185 +++++++++++++++--- .../test/library-tests/regex/charRangeTest.py | 32 +-- .../test/library-tests/regex/charSetTest.py | 32 +-- .../regex/escapedCharacterTest.py | 14 +- .../ql/test/library-tests/regex/groupTest.py | 4 +- 5 files changed, 201 insertions(+), 66 deletions(-) diff --git a/python/ql/src/semmle/python/regex.qll b/python/ql/src/semmle/python/regex.qll index d201b236bf6e..b8e1405194e5 100644 --- a/python/ql/src/semmle/python/regex.qll +++ b/python/ql/src/semmle/python/regex.qll @@ -121,8 +121,57 @@ deprecated string mode_from_mode_object(Value obj) { abstract class RegexString extends Expr { RegexString() { (this instanceof Bytes or this instanceof Unicode) } + override string toString() { + result = this.(Bytes).getText() + or + result = this.(Unicode).getText() + } + + /** result is true for those start chars that actually mark a start of a char set. */ + boolean char_set_start(int pos) { + exists(int index | + char_set_delimiter(index, pos) = true and + ( + index = 1 and result = true // if a '[' is first in the string (among brackets), it starts a char set + or + index > 1 and + not char_set_delimiter(index - 1, _) = false and + result = false + or + exists(int p1 | + char_set_delimiter(index - 1, p1) = false and // if it is preceded by a closing bracket, it starts a char set + if + exists(int p2 | + p1 = p2 + 1 + or + this.getChar(p2 + 1) = "^" and + p1 = p2 + 2 + | + char_set_delimiter(index - 2, p2) = true // but the closing bracket only closes... + ) + then + exists(int p2 | char_set_delimiter(index - 2, p2) = true | + result = char_set_start(p2).booleanNot() // ...if it is not the first in a char set + ) + else result = true + ) + ) + ) + } + + /** result denotes if the index is a left bracket */ + boolean char_set_delimiter(int index, int pos) { + pos = rank[index](int p | this.nonEscapedCharAt(p) = "[" or this.nonEscapedCharAt(p) = "]") and + ( + this.nonEscapedCharAt(pos) = "[" and result = true + or + this.nonEscapedCharAt(pos) = "]" and result = false + ) + } + + /** Hold is a character set starts between `start` and `end`. */ predicate char_set_start(int start, int end) { - this.nonEscapedCharAt(start) = "[" and + this.char_set_start(start) = true and ( this.getChar(start + 1) = "^" and end = start + 2 or @@ -143,23 +192,80 @@ abstract class RegexString extends Expr { ) } + /** An indexed version of `char_set_token/3` */ + private predicate char_set_token(int charset_start, int index, int token_start, int token_end) { + token_start = + rank[index](int start, int end | this.char_set_token(charset_start, start, end) | start) and + this.char_set_token(charset_start, token_start, token_end) + } + + /** Either a char or a - */ + private predicate char_set_token(int charset_start, int start, int end) { + this.char_set_start(charset_start, start) and + ( + this.escapedCharacter(start, end) + or + exists(this.nonEscapedCharAt(start)) and end = start + 1 + ) + or + this.char_set_token(charset_start, _, start) and + ( + this.escapedCharacter(start, end) + or + exists(this.nonEscapedCharAt(start)) and + end = start + 1 and + not this.getChar(start) = "]" + ) + } + + /** + * Holds if the character set starting at `charset_start` contains either + * a character or a range found between `start` and `end`. + */ + predicate char_set_child(int charset_start, int start, int end) { + this.char_set_token(charset_start, start, end) and + not exists(int range_start, int range_end | + this.charRange(charset_start, range_start, _, _, range_end) and + range_start <= start and + range_end >= end + ) + or + this.charRange(charset_start, start, _, _, end) + } + /** * Holds if the character set starting at `charset_start` contains a character range * with lower bound found between `start` and `lower_end` * and upper bound found between `upper_start` and `end`. */ predicate charRange(int charset_start, int start, int lower_end, int upper_start, int end) { - // mirror logic from `simpleCharacter` - exists(int x, int y | - this.charSet(charset_start, y) and - this.char_set_start(charset_start, x) - | - x <= start and - this.simpleCharacter(start, lower_end) and - this.nonEscapedCharAt(lower_end) = "-" and - lower_end + 1 = upper_start and - this.simpleCharacter(upper_start, end) and - end < y + exists(int index | + this.charRangeEnd(charset_start, index) = true and + this.char_set_token(charset_start, index - 2, start, lower_end) and + this.char_set_token(charset_start, index, upper_start, end) + ) + } + + private boolean charRangeEnd(int charset_start, int index) { + this.char_set_token(charset_start, index, _, _) and + ( + index in [1, 2] and result = false + or + index > 2 and + exists(int connector_start | + this.char_set_token(charset_start, index - 1, connector_start, _) and + this.nonEscapedCharAt(connector_start) = "-" and + result = + this.charRangeEnd(charset_start, index - 2) + .booleanNot() + .booleanAnd(this.charRangeEnd(charset_start, index - 1).booleanNot()) + ) + or + not exists(int connector_start | + this.char_set_token(charset_start, index - 1, connector_start, _) and + this.nonEscapedCharAt(connector_start) = "-" + ) and + result = false ) } @@ -184,14 +290,14 @@ abstract class RegexString extends Expr { string nonEscapedCharAt(int i) { result = this.getText().charAt(i) and - not this.escapingChar(i - 1) + not exists(int x, int y | this.escapedCharacter(x, y) and i in [x .. y - 1]) } private predicate isOptionDivider(int i) { this.nonEscapedCharAt(i) = "|" } - private predicate isGroupEnd(int i) { this.nonEscapedCharAt(i) = ")" } + private predicate isGroupEnd(int i) { this.nonEscapedCharAt(i) = ")" and not this.inCharSet(i) } - private predicate isGroupStart(int i) { this.nonEscapedCharAt(i) = "(" } + private predicate isGroupStart(int i) { this.nonEscapedCharAt(i) = "(" and not this.inCharSet(i) } predicate failedToParse(int i) { exists(this.getChar(i)) and @@ -219,14 +325,18 @@ abstract class RegexString extends Expr { */ predicate escapedCharacter(int start, int end) { this.escapingChar(start) and - not exists(this.getText().substring(start + 1, end + 1).toInt()) and + not this.numbered_backreference(start, _, _) and ( // hex value \xhh this.getChar(start + 1) = "x" and end = start + 4 or // octal value \ooo end in [start + 2 .. start + 4] and - exists(this.getText().substring(start + 1, end).toInt()) + this.getText().substring(start + 1, end).toInt() >= 0 and + not ( + end < start + 4 and + exists(this.getText().substring(start + 1, end + 1).toInt()) + ) or // 16-bit hex value \uhhhh this.getChar(start + 1) = "u" and end = start + 6 @@ -238,11 +348,13 @@ abstract class RegexString extends Expr { or // escape not handled above, update when adding a new case not this.getChar(start + 1) in ["x", "u", "U", "N"] and + not exists(this.getChar(start + 1).toInt()) and end = start + 2 ) } - private predicate inCharSet(int index) { + /** Holds if `index` is inside a character set. */ + predicate inCharSet(int index) { exists(int x, int y | this.charSet(x, y) and index in [x + 1 .. y - 2]) } @@ -262,7 +374,7 @@ abstract class RegexString extends Expr { or start = z - 2 or - start > y and start < z - 2 and not c = "-" + start > y and start < z - 2 and not this.charRange(_, _, start, end, _) ) or not this.inCharSet(start) and @@ -281,7 +393,8 @@ abstract class RegexString extends Expr { or this.escapedCharacter(start, end) ) and - not exists(int x, int y | this.group_start(x, y) and x <= start and y >= end) + not exists(int x, int y | this.group_start(x, y) and x <= start and y >= end) and + not exists(int x, int y | this.backreference(x, y) and x <= start and y >= end) } predicate normalCharacter(int start, int end) { @@ -326,12 +439,13 @@ abstract class RegexString extends Expr { or this.negativeAssertionGroup(start, end) or - positiveLookaheadAssertionGroup(start, end) + this.positiveLookaheadAssertionGroup(start, end) or this.positiveLookbehindAssertionGroup(start, end) } - private predicate emptyGroup(int start, int end) { + /** Holds if an empty group is found between `start` and `end`. */ + predicate emptyGroup(int start, int end) { exists(int endm1 | end = endm1 + 1 | this.group_start(start, endm1) and this.isGroupEnd(endm1) @@ -364,13 +478,29 @@ abstract class RegexString extends Expr { ) } - private predicate positiveLookaheadAssertionGroup(int start, int end) { + /** Holds if a negative lookahead is found between `start` and `end` */ + predicate negativeLookaheadAssertionGroup(int start, int end) { + exists(int in_start | this.negative_lookahead_assertion_start(start, in_start) | + this.groupContents(start, end, in_start, _) + ) + } + + /** Holds if a negative lookbehind is found between `start` and `end` */ + predicate negativeLookbehindAssertionGroup(int start, int end) { + exists(int in_start | this.negative_lookbehind_assertion_start(start, in_start) | + this.groupContents(start, end, in_start, _) + ) + } + + /** Holds if a positive lookahead is found between `start` and `end` */ + predicate positiveLookaheadAssertionGroup(int start, int end) { exists(int in_start | this.lookahead_assertion_start(start, in_start) | this.groupContents(start, end, in_start, _) ) } - private predicate positiveLookbehindAssertionGroup(int start, int end) { + /** Holds if a positive lookbehind is found between `start` and `end` */ + predicate positiveLookbehindAssertionGroup(int start, int end) { exists(int in_start | this.lookbehind_assertion_start(start, in_start) | this.groupContents(start, end, in_start, _) ) @@ -429,6 +559,8 @@ abstract class RegexString extends Expr { this.getChar(start + 1) = "?" and this.getChar(start + 2) = "P" and this.getChar(start + 3) = "=" and + // Should this be looking for unescaped ")"? + // TODO: test this end = min(int i | i > start + 4 and this.getChar(i) = "?") } @@ -519,6 +651,7 @@ abstract class RegexString extends Expr { private predicate numbered_backreference(int start, int end, int value) { this.escapingChar(start) and + not this.getChar(start + 1) = "0" and exists(string text, string svalue, int len | end = start + len and text = this.getText() and @@ -527,7 +660,7 @@ abstract class RegexString extends Expr { svalue = text.substring(start + 1, start + len) and value = svalue.toInt() and not exists(text.substring(start + 1, start + len + 1).toInt()) and - value != 0 + value > 0 ) } @@ -551,6 +684,8 @@ abstract class RegexString extends Expr { this.group(start, end) or this.charSet(start, end) + or + this.backreference(start, end) } private predicate qualifier(int start, int end, boolean maybe_empty) { diff --git a/python/ql/test/library-tests/regex/charRangeTest.py b/python/ql/test/library-tests/regex/charRangeTest.py index ac862e1962c8..692c5d9a83e6 100644 --- a/python/ql/test/library-tests/regex/charRangeTest.py +++ b/python/ql/test/library-tests/regex/charRangeTest.py @@ -1,32 +1,32 @@ import re -re.compile(r'[]-[]') #$ MISSING: charRange=1:2-3:4 -re.compile(r'[---]') #$ MISSING: charRange=1:2-3:4 -re.compile(r'[\---]') #$ MISSING: charRange=1:3-4:5 -re.compile(r'[--\-]') #$ MISSING: charRange=1:2-3:5 -re.compile(r'[\--\-]') #$ cMISSING: harRange=1:3-4:6 -re.compile(r'[0-9-A-Z]') #$ MISSING: charRange=1:2-3:4 charRange=5:6-7:8 -re.compile(r'[0\-9-A-Z]') #$ MISSING: charRange=4:5-6:7 -re.compile(r'[0--9-A-Z]') #$ MISSING: charRange=1:2-3:4 charRange=4:5-6:7 +re.compile(r'[]-[]') #$ charRange=1:2-3:4 +re.compile(r'[---]') #$ charRange=1:2-3:4 +re.compile(r'[\---]') #$ charRange=1:3-4:5 +re.compile(r'[--\-]') #$ charRange=1:2-3:5 +re.compile(r'[\--\-]') #$ charRange=1:3-4:6 +re.compile(r'[0-9-A-Z]') #$ charRange=1:2-3:4 charRange=5:6-7:8 +re.compile(r'[0\-9-A-Z]') #$ charRange=4:5-6:7 +re.compile(r'[0--9-A-Z]') #$ charRange=1:2-3:4 charRange=4:5-6:7 -re.compile(r'[^A-Z]') #$ MISSING: charRange=2:3-4:5 +re.compile(r'[^A-Z]') #$ charRange=2:3-4:5 -re.compile(r'[\0-\09]') #$ MISSING: charRange=1:3-4:7 +re.compile(r'[\0-\09]') #$ charRange=1:3-4:7 -re.compile(r'[\0123-5]') #$ MISSING: charRange=5:6-7:8 +re.compile(r'[\0123-5]') #$ charRange=5:6-7:8 #Negative lookahead -re.compile(r'(?!not-this)^[A-Z_]+$') #$ MISSING: charRange=14:15-16:17 +re.compile(r'(?!not-this)^[A-Z_]+$') #$ charRange=14:15-16:17 #Negative lookbehind -re.compile(r'^[A-Z_]+$(?[\w]+)|') #$ MISSING: charSet=9:13 -re.compile(r'\|\[\][123]|\{\}') #$ MISSING: charSet=6:11 -re.compile(r'[^A-Z]') #$ MISSING: charSet=0:6 +re.compile(r'\A[+-]?\d+') #$ charSet=2:6 +re.compile(r'(?P[\w]+)|') #$ charSet=9:13 +re.compile(r'\|\[\][123]|\{\}') #$ charSet=6:11 +re.compile(r'[^A-Z]') #$ charSet=0:6 re.compile("[]]") #$ charSet=0:3 -re.compile("[][]") #$ MISSING: charSet=0:4 -re.compile("[^][^]") #$ MISSING: charSet=0:6 -re.compile("[.][.]") #$ charSet=0:3 MISSING: charSet=3:6 +re.compile("[][]") #$ charSet=0:4 +re.compile("[^][^]") #$ charSet=0:6 +re.compile("[.][.]") #$ charSet=0:3 charSet=3:6 re.compile("[[]]") #$ charSet=0:3 -re.compile("[^]]") #$ MISSING: charSet=0:4 -re.compile("[^-]") #$ MISSING: charSet=0:4 -re.compile("[]-[]") #$ MISSING: charSet=0:5 -re.compile("[^]-[]") #$ MISSING: charSet=0:6 +re.compile("[^]]") #$ charSet=0:4 +re.compile("[^-]") #$ charSet=0:4 +re.compile("[]-[]") #$ charSet=0:5 +re.compile("[^]-[]") #$ charSet=0:6 -re.compile("]]][[[[]") #$ MISSING: charSet=3:8 +re.compile("]]][[[[]") #$ charSet=3:8 #ODASA-3985 #Half Surrogate pairs -re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]') #$ MISSING: charSet=0:5 charSet=5:10 +re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]') #$ charSet=0:5 charSet=5:10 #Outside BMP -re.compile(u'[\U00010000-\U0010ffff]') #$ MISSING: charSet=0:5 +re.compile(u'[\U00010000-\U0010ffff]') #$ charSet=0:5 #Misparsed on LGTM -re.compile(r"\[(?P[^[]*)\]\((?P[^)]*)") #$ MISSING: charSet=10:14 charSet=28:32 +re.compile(r"\[(?P[^[]*)\]\((?P[^)]*)") #$ charSet=10:14 charSet=28:32 # parses wrongly, sees this \|/ as a char set start -re.compile(r'''(?:[\s;,"'<>(){}|[\]@=+*]|:(?![/\\]))+''') #$ MISSING: charSet=3:25 charSet=30:35 +re.compile(r'''(?:[\s;,"'<>(){}|[\]@=+*]|:(?![/\\]))+''') #$ charSet=3:25 charSet=30:35 diff --git a/python/ql/test/library-tests/regex/escapedCharacterTest.py b/python/ql/test/library-tests/regex/escapedCharacterTest.py index 200b1a7a7af7..e6add851a8c7 100644 --- a/python/ql/test/library-tests/regex/escapedCharacterTest.py +++ b/python/ql/test/library-tests/regex/escapedCharacterTest.py @@ -5,13 +5,13 @@ re.compile(r"\b") #$ escapedCharacter=0:2 re.compile(u"\b") # not escape re.compile("\b") # not escape -re.compile(r'\\\b') #$ escapedCharacter=0:2 MISSING: escapedCharacter=2:4 +re.compile(r'\\\b') #$ escapedCharacter=0:2 escapedCharacter=2:4 re.compile(r'[\---]') #$ escapedCharacter=1:3 -re.compile(r'[--\-]') #$ MISSING: escapedCharacter=3:5 -re.compile(r'[\--\-]') #$ escapedCharacter=1:3 MISSING: escapedCharacter=4:6 -re.compile(r'[0\-9-A-Z]') #$ MISSING: escapedCharacter=2:4 -re.compile(r'[\0-\09]') #$ escapedCharacter=1:3 MISSING: escapedCharacter=4:7 -re.compile(r'[\0123-5]') #$ MISSING: escapedCharacter=1:5 +re.compile(r'[--\-]') #$ escapedCharacter=3:5 +re.compile(r'[\--\-]') #$ escapedCharacter=1:3 escapedCharacter=4:6 +re.compile(r'[0\-9-A-Z]') #$ escapedCharacter=2:4 +re.compile(r'[\0-\09]') #$ escapedCharacter=1:3 escapedCharacter=4:7 +re.compile(r'[\0123-5]') #$ escapedCharacter=1:5 #ODASA-3985 #Half Surrogate pairs @@ -20,4 +20,4 @@ re.compile(u'[\U00010000-\U0010ffff]') # not escapes #Misparsed on LGTM -re.compile(r"\[(?P[^[]*)\]\((?P[^)]*)") #$ escapedCharacter=0:2 MISSING: escapedCharacter=16:18 escapedCharacter=18:20 +re.compile(r"\[(?P[^[]*)\]\((?P[^)]*)") #$ escapedCharacter=0:2 escapedCharacter=16:18 escapedCharacter=18:20 diff --git a/python/ql/test/library-tests/regex/groupTest.py b/python/ql/test/library-tests/regex/groupTest.py index 634cf646fdf9..2cb96801f776 100644 --- a/python/ql/test/library-tests/regex/groupTest.py +++ b/python/ql/test/library-tests/regex/groupTest.py @@ -1,4 +1,4 @@ import re -re.compile(r'(?P\w+) (?P\w+)') #$ MISSING: group=0:14 group=15:30 -re.compile(r'([)(])') #$ MISSING: group=0:6 +re.compile(r'(?P\w+) (?P\w+)') #$ group=0:14 group=15:30 +re.compile(r'([)(])') #$ group=0:6 From 21007d21f4b53080db0d2dfa44be98bfcd86cb46 Mon Sep 17 00:00:00 2001 From: Rasmus Lerchedahl Petersen Date: Mon, 28 Jun 2021 14:54:30 +0200 Subject: [PATCH 03/22] Python: track if qualifiers allow unbounded repeats. This in preparation for ReDoS --- python/ql/src/semmle/python/regex.qll | 103 +++++++++++------- .../library-tests/regex/Qualified.expected | 30 ++--- .../ql/test/library-tests/regex/Qualified.ql | 6 +- python/ql/test/library-tests/regex/Regex.ql | 2 +- 4 files changed, 81 insertions(+), 60 deletions(-) diff --git a/python/ql/src/semmle/python/regex.qll b/python/ql/src/semmle/python/regex.qll index b8e1405194e5..9aae8ded6a4d 100644 --- a/python/ql/src/semmle/python/regex.qll +++ b/python/ql/src/semmle/python/regex.qll @@ -382,7 +382,7 @@ abstract class RegexString extends Expr { not c = "[" and not c = ")" and not c = "|" and - not this.qualifier(start, _, _) + not this.qualifier(start, _, _, _) ) } @@ -688,41 +688,51 @@ abstract class RegexString extends Expr { this.backreference(start, end) } - private predicate qualifier(int start, int end, boolean maybe_empty) { - this.short_qualifier(start, end, maybe_empty) and not this.getChar(end) = "?" + private predicate qualifier(int start, int end, boolean maybe_empty, boolean may_repeat_forever) { + this.short_qualifier(start, end, maybe_empty, may_repeat_forever) and + not this.getChar(end) = "?" or - exists(int short_end | this.short_qualifier(start, short_end, maybe_empty) | + exists(int short_end | this.short_qualifier(start, short_end, maybe_empty, may_repeat_forever) | if this.getChar(short_end) = "?" then end = short_end + 1 else end = short_end ) } - private predicate short_qualifier(int start, int end, boolean maybe_empty) { + private predicate short_qualifier( + int start, int end, boolean maybe_empty, boolean may_repeat_forever + ) { ( - this.getChar(start) = "+" and maybe_empty = false + this.getChar(start) = "+" and maybe_empty = false and may_repeat_forever = true or - this.getChar(start) = "*" and maybe_empty = true + this.getChar(start) = "*" and maybe_empty = true and may_repeat_forever = true or - this.getChar(start) = "?" and maybe_empty = true + this.getChar(start) = "?" and maybe_empty = true and may_repeat_forever = false ) and end = start + 1 or - exists(int endin | end = endin + 1 | - this.getChar(start) = "{" and - this.getChar(endin) = "}" and - end > start and - exists(string multiples | multiples = this.getText().substring(start + 1, endin) | - multiples.regexpMatch("0+") and maybe_empty = true - or - multiples.regexpMatch("0*,[0-9]*") and maybe_empty = true - or - multiples.regexpMatch("0*[1-9][0-9]*") and maybe_empty = false - or - multiples.regexpMatch("0*[1-9][0-9]*,[0-9]*") and maybe_empty = false - ) and - not exists(int mid | - this.getChar(mid) = "}" and - mid > start and - mid < endin + exists(string lower, string upper | + this.multiples(start, end, lower, upper) and + (if lower = "" or lower.toInt() = 0 then maybe_empty = true else maybe_empty = false) and + if upper = "" then may_repeat_forever = true else may_repeat_forever = false + ) + } + + /** + * Holds if a repetition quantifier is found between `start` and `end`, + * with the given lower and upper bounds. If a bound is omitted, the corresponding + * string is empty. + */ + predicate multiples(int start, int end, string lower, string upper) { + this.getChar(start) = "{" and + this.getChar(end - 1) = "}" and + exists(string inner | inner = this.getText().substring(start + 1, end - 1) | + inner.regexpMatch("[0-9]+") and + lower = inner and + upper = lower + or + inner.regexpMatch("[0-9]*,[0-9]*") and + exists(int commaIndex | commaIndex = inner.indexOf(",") | + lower = inner.prefix(commaIndex) and + upper = inner.suffix(commaIndex + 1) ) ) } @@ -731,19 +741,29 @@ abstract class RegexString extends Expr { * Whether the text in the range start,end is a qualified item, where item is a character, * a character set or a group. */ - predicate qualifiedItem(int start, int end, boolean maybe_empty) { - this.qualifiedPart(start, _, end, maybe_empty) + predicate qualifiedItem(int start, int end, boolean maybe_empty, boolean may_repeat_forever) { + this.qualifiedPart(start, _, end, maybe_empty, may_repeat_forever) } - private predicate qualifiedPart(int start, int part_end, int end, boolean maybe_empty) { + /** + * Holds if a qualified part is found between `start` and `part_end` and the qualifier is + * found between `part_end` and `end`. + * + * `maybe_empty` is true if the part is optional. + * `may_repeat_forever` is true if the part may be repeated unboundedly. + */ + predicate qualifiedPart( + int start, int part_end, int end, boolean maybe_empty, boolean may_repeat_forever + ) { this.baseItem(start, part_end) and - this.qualifier(part_end, end, maybe_empty) + this.qualifier(part_end, end, maybe_empty, may_repeat_forever) } - private predicate item(int start, int end) { - this.qualifiedItem(start, end, _) + /** Holds if the range `start`, `end` contains a character, a quantifier, a character set or a group. */ + predicate item(int start, int end) { + this.qualifiedItem(start, end, _, _) or - this.baseItem(start, end) and not this.qualifier(end, _, _) + this.baseItem(start, end) and not this.qualifier(end, _, _, _) } private predicate subsequence(int start, int end) { @@ -766,7 +786,7 @@ abstract class RegexString extends Expr { */ predicate sequence(int start, int end) { this.sequenceOrQualified(start, end) and - not this.qualifiedItem(start, end, _) + not this.qualifiedItem(start, end, _, _) } private predicate sequenceOrQualified(int start, int end) { @@ -777,7 +797,8 @@ abstract class RegexString extends Expr { private predicate item_start(int start) { this.character(start, _) or this.isGroupStart(start) or - this.charSet(start, _) + this.charSet(start, _) or + this.backreference(start, _) } private predicate item_end(int end) { @@ -787,7 +808,7 @@ abstract class RegexString extends Expr { or this.charSet(_, end) or - this.qualifier(_, end, _) + this.qualifier(_, end, _, _) } private predicate top_level(int start, int end) { @@ -839,14 +860,14 @@ abstract class RegexString extends Expr { or exists(int x | this.firstPart(x, end) | this.emptyMatchAtStartGroup(x, start) or - this.qualifiedItem(x, start, true) or + this.qualifiedItem(x, start, true, _) or this.specialCharacter(x, start, "^") ) or exists(int y | this.firstPart(start, y) | this.item(start, end) or - this.qualifiedPart(start, end, y, _) + this.qualifiedPart(start, end, y, _, _) ) or exists(int x, int y | this.firstPart(x, y) | @@ -863,7 +884,7 @@ abstract class RegexString extends Expr { exists(int y | this.lastPart(start, y) | this.emptyMatchAtEndGroup(end, y) or - this.qualifiedItem(end, y, true) + this.qualifiedItem(end, y, true, _) or this.specialCharacter(end, y, "$") or @@ -875,7 +896,7 @@ abstract class RegexString extends Expr { this.item(start, end) ) or - exists(int y | this.lastPart(start, y) | this.qualifiedPart(start, end, y, _)) + exists(int y | this.lastPart(start, y) | this.qualifiedPart(start, end, y, _, _)) or exists(int x, int y | this.lastPart(x, y) | this.groupContents(x, y, start, end) @@ -892,7 +913,7 @@ abstract class RegexString extends Expr { ( this.character(start, end) or - this.qualifiedItem(start, end, _) + this.qualifiedItem(start, end, _, _) or this.charSet(start, end) ) and @@ -907,7 +928,7 @@ abstract class RegexString extends Expr { ( this.character(start, end) or - this.qualifiedItem(start, end, _) + this.qualifiedItem(start, end, _, _) or this.charSet(start, end) ) and diff --git a/python/ql/test/library-tests/regex/Qualified.expected b/python/ql/test/library-tests/regex/Qualified.expected index 30019f943eb9..b698a1d09557 100644 --- a/python/ql/test/library-tests/regex/Qualified.expected +++ b/python/ql/test/library-tests/regex/Qualified.expected @@ -1,15 +1,15 @@ -| (?!not-this)^[A-Z_]+$ | 13 | 20 | false | -| (?:(?:\n\r?)\|^)( *)\\S | 7 | 9 | true | -| (?:(?:\n\r?)\|^)( *)\\S | 14 | 16 | true | -| (?:[^%]\|^)?%\\((\\w*)\\)[a-z] | 0 | 11 | true | -| (?:[^%]\|^)?%\\((\\w*)\\)[a-z] | 15 | 18 | true | -| (?P[\\w]+)\| | 9 | 14 | false | -| \\A[+-]?\\d+ | 2 | 7 | true | -| \\A[+-]?\\d+ | 7 | 10 | false | -| \\[(?P[^[]*)\\]\\((?P[^)]*) | 10 | 15 | true | -| \\[(?P[^[]*)\\]\\((?P[^)]*) | 28 | 33 | true | -| ^[A-Z_]+$(?[\\w]+)\| | 9 | 14 | false | true | +| \\A[+-]?\\d+ | 2 | 7 | true | false | +| \\A[+-]?\\d+ | 7 | 10 | false | true | +| \\[(?P[^[]*)\\]\\((?P[^)]*) | 10 | 15 | true | true | +| \\[(?P[^[]*)\\]\\((?P[^)]*) | 28 | 33 | true | true | +| ^[A-Z_]+$(? Date: Mon, 28 Jun 2021 15:26:39 +0200 Subject: [PATCH 04/22] Python: A parse-tree-view of regular expressions This contains several contributions from @erik-krogh and also some fixes from @nickrolfe --- python/ql/src/semmle/python/RegexTreeView.qll | 958 ++++++++++++++++++ 1 file changed, 958 insertions(+) create mode 100644 python/ql/src/semmle/python/RegexTreeView.qll diff --git a/python/ql/src/semmle/python/RegexTreeView.qll b/python/ql/src/semmle/python/RegexTreeView.qll new file mode 100644 index 000000000000..30c6dc70f0d8 --- /dev/null +++ b/python/ql/src/semmle/python/RegexTreeView.qll @@ -0,0 +1,958 @@ +/** Provides a class hierarchy corresponding to a parse tree of regular expressions. */ + +import python +private import semmle.python.regex + +/** + * An element containing a regular expression term, that is, either + * a string literal (parsed as a regular expression) + * or another regular expression term. + */ +newtype TRegExpParent = + /** A string literal used as a regular expression */ + TRegExpLiteral(Regex re) or + /** A quantified term */ + TRegExpQuantifier(Regex re, int start, int end) { re.qualifiedItem(start, end, _, _) } or + /** A sequence term */ + TRegExpSequence(Regex re, int start, int end) { re.sequence(start, end) } or + /** An alternatio term */ + TRegExpAlt(Regex re, int start, int end) { re.alternation(start, end) } or + /** A character class term */ + TRegExpCharacterClass(Regex re, int start, int end) { re.charSet(start, end) } or + /** A character range term */ + TRegExpCharacterRange(Regex re, int start, int end) { re.charRange(_, start, _, _, end) } or + /** A group term */ + TRegExpGroup(Regex re, int start, int end) { re.group(start, end) } or + /** A special character */ + TRegExpSpecialChar(Regex re, int start, int end) { re.specialCharacter(start, end, _) } or + /** A normal character */ + TRegExpNormalChar(Regex re, int start, int end) { re.normalCharacter(start, end) } or + /** A back reference */ + TRegExpBackRef(Regex re, int start, int end) { re.backreference(start, end) } + +/** + * An element containing a regular expression term, that is, either + * a string literal (parsed as a regular expression) + * or another regular expression term. + */ +class RegExpParent extends TRegExpParent { + string toString() { result = "RegExpParent" } + + /** Gets the `i`th child term. */ + abstract RegExpTerm getChild(int i); + + /** Gets a child term . */ + RegExpTerm getAChild() { result = getChild(_) } + + /** Gets the number of child terms. */ + int getNumChild() { result = count(getAChild()) } + + /** Gets the associated regex. */ + abstract Regex getRegex(); +} + +/** A string literal used as a regular expression */ +class RegExpLiteral extends TRegExpLiteral, RegExpParent { + Regex re; + + RegExpLiteral() { this = TRegExpLiteral(re) } + + override RegExpTerm getChild(int i) { i = 0 and result.getRegex() = re and result.isRootTerm() } + + predicate isDotAll() { re.getAMode() = "DOTALL" } + + override Regex getRegex() { result = re } + + string getPrimaryQLClass() { result = "RegExpLiteral" } +} + +/** + * A regular expression term, that is, a syntactic part of a regular expression. + */ +class RegExpTerm extends RegExpParent { + Regex re; + int start; + int end; + + RegExpTerm() { + this = TRegExpAlt(re, start, end) + or + this = TRegExpBackRef(re, start, end) + or + this = TRegExpCharacterClass(re, start, end) + or + this = TRegExpCharacterRange(re, start, end) + or + this = TRegExpNormalChar(re, start, end) + or + this = TRegExpGroup(re, start, end) + or + this = TRegExpQuantifier(re, start, end) + or + this = TRegExpSequence(re, start, end) and + exists(seqChild(re, start, end, 1)) // if a sequence does not have more than one element, it should be treated as that element instead. + or + this = TRegExpSpecialChar(re, start, end) + } + + /** + * Gets the outermost term of this regular expression. + */ + RegExpTerm getRootTerm() { + this.isRootTerm() and result = this + or + result = getParent().(RegExpTerm).getRootTerm() + } + + /** + * Holds if this term is part of a string literal + * that is interpreted as a regular expression. + */ + predicate isUsedAsRegExp() { any() } + + /** + * Holds if this is the root term of a regular expression. + */ + predicate isRootTerm() { start = 0 and end = re.getText().length() } + + override RegExpTerm getChild(int i) { + result = this.(RegExpAlt).getChild(i) + or + result = this.(RegExpBackRef).getChild(i) + or + result = this.(RegExpCharacterClass).getChild(i) + or + result = this.(RegExpCharacterRange).getChild(i) + or + result = this.(RegExpNormalChar).getChild(i) + or + result = this.(RegExpGroup).getChild(i) + or + result = this.(RegExpQuantifier).getChild(i) + or + result = this.(RegExpSequence).getChild(i) + or + result = this.(RegExpSpecialChar).getChild(i) + } + + /** + * Gets the parent term of this regular expression term, or the + * regular expression literal if this is the root term. + */ + RegExpParent getParent() { result.getAChild() = this } + + override Regex getRegex() { result = re } + + /** Gets the offset at which this term starts. */ + int getStart() { result = start } + + /** Gets the offset at which this term ends. */ + int getEnd() { result = end } + + override string toString() { result = re.getText().substring(start, end) } + + /** + * Gets the location of the surrounding regex, as locations inside the regex do not exist. + * To get location information corresponding to the term inside the regex, + * use `hasLocationInfo`. + */ + Location getLocation() { result = re.getLocation() } + + /** Holds if this term is found at the specified location offsets. */ + predicate hasLocationInfo( + string filepath, int startline, int startcolumn, int endline, int endcolumn + ) { + exists(int re_start, int re_end | + re.getLocation().hasLocationInfo(filepath, startline, re_start, endline, re_end) and + startcolumn = re_start + start + 4 and + endcolumn = re_start + end + 3 + ) + } + + /** Gets the file in which this term is found. */ + File getFile() { result = this.getLocation().getFile() } + + /** Gets the raw source text of this term. */ + string getRawValue() { result = this.toString() } + + /** Gets the string literal in which this term is found. */ + RegExpLiteral getLiteral() { result = TRegExpLiteral(re) } + + /** Gets the regular expression term that is matched (textually) before this one, if any. */ + RegExpTerm getPredecessor() { + exists(RegExpTerm parent | parent = getParent() | + result = parent.(RegExpSequence).previousElement(this) + or + not exists(parent.(RegExpSequence).previousElement(this)) and + not parent instanceof RegExpSubPattern and + result = parent.getPredecessor() + ) + } + + /** Gets the regular expression term that is matched (textually) after this one, if any. */ + RegExpTerm getSuccessor() { + exists(RegExpTerm parent | parent = getParent() | + result = parent.(RegExpSequence).nextElement(this) + or + not exists(parent.(RegExpSequence).nextElement(this)) and + not parent instanceof RegExpSubPattern and + result = parent.getSuccessor() + ) + } + + /** Gets the primary QL class for this term. */ + string getPrimaryQLClass() { result = "RegExpTerm" } +} + +/** + * A quantified regular expression term. + * + * Example: + * + * ``` + * ((ECMA|Java)[sS]cript)* + * ``` + */ +class RegExpQuantifier extends RegExpTerm, TRegExpQuantifier { + int part_end; + boolean maybe_empty; + boolean may_repeat_forever; + + RegExpQuantifier() { + this = TRegExpQuantifier(re, start, end) and + re.qualifiedPart(start, part_end, end, maybe_empty, may_repeat_forever) + } + + override RegExpTerm getChild(int i) { + i = 0 and + result.getRegex() = re and + result.getStart() = start and + result.getEnd() = part_end + } + + predicate mayRepeatForever() { may_repeat_forever = true } + + string getQualifier() { result = re.getText().substring(part_end, end) } + + override string getPrimaryQLClass() { result = "RegExpQuantifier" } +} + +/** + * A regular expression term that permits unlimited repetitions. + */ +class InfiniteRepetitionQuantifier extends RegExpQuantifier { + InfiniteRepetitionQuantifier() { this.mayRepeatForever() } +} + +/** + * A star-quantified term. + * + * Example: + * + * ``` + * \w* + * ``` + */ +class RegExpStar extends InfiniteRepetitionQuantifier { + RegExpStar() { this.getQualifier().charAt(0) = "*" } + + override string getPrimaryQLClass() { result = "RegExpStar" } +} + +/** + * A plus-quantified term. + * + * Example: + * + * ``` + * \w+ + * ``` + */ +class RegExpPlus extends InfiniteRepetitionQuantifier { + RegExpPlus() { this.getQualifier().charAt(0) = "+" } + + override string getPrimaryQLClass() { result = "RegExpPlus" } +} + +/** + * An optional term. + * + * Example: + * + * ``` + * ;? + * ``` + */ +class RegExpOpt extends RegExpQuantifier { + RegExpOpt() { this.getQualifier().charAt(0) = "?" } + + override string getPrimaryQLClass() { result = "RegExpOpt" } +} + +/** + * A range-quantified term + * + * Examples: + * + * ``` + * \w{2,4} + * \w{2,} + * \w{2} + * ``` + */ +class RegExpRange extends RegExpQuantifier { + string upper; + string lower; + + RegExpRange() { re.multiples(part_end, end, lower, upper) } + + string getUpper() { result = upper } + + string getLower() { result = lower } + + /** + * Gets the upper bound of the range, if any. + * + * If there is no upper bound, any number of repetitions is allowed. + * For a term of the form `r{lo}`, both the lower and the upper bound + * are `lo`. + */ + int getUpperBound() { result = this.getUpper().toInt() } + + /** Gets the lower bound of the range. */ + int getLowerBound() { result = this.getLower().toInt() } + + override string getPrimaryQLClass() { result = "RegExpRange" } +} + +/** + * A sequence term. + * + * Example: + * + * ``` + * (ECMA|Java)Script + * ``` + * + * This is a sequence with the elements `(ECMA|Java)` and `Script`. + */ +class RegExpSequence extends RegExpTerm, TRegExpSequence { + RegExpSequence() { + this = TRegExpSequence(re, start, end) and + exists(seqChild(re, start, end, 1)) // if a sequence does not have more than one element, it should be treated as that element instead. + } + + override RegExpTerm getChild(int i) { result = seqChild(re, start, end, i) } + + /** Gets the element preceding `element` in this sequence. */ + RegExpTerm previousElement(RegExpTerm element) { element = nextElement(result) } + + /** Gets the element following `element` in this sequence. */ + RegExpTerm nextElement(RegExpTerm element) { + exists(int i | + element = this.getChild(i) and + result = this.getChild(i + 1) + ) + } + + override string getPrimaryQLClass() { result = "RegExpSequence" } +} + +// moved out so we can use it in the charpred +private RegExpTerm seqChild(Regex re, int start, int end, int i) { + re.sequence(start, end) and + ( + i = 0 and + result.getRegex() = re and + result.getStart() = start and + exists(int itemEnd | + re.item(start, itemEnd) and + result.getEnd() = itemEnd + ) + or + i > 0 and + result.getRegex() = re and + exists(int itemStart | itemStart = seqChild(re, start, end, i - 1).getEnd() | + result.getStart() = itemStart and + re.item(itemStart, result.getEnd()) + ) + ) +} + +/** + * An alternative term, that is, a term of the form `a|b`. + * + * Example: + * + * ``` + * ECMA|Java + * ``` + */ +class RegExpAlt extends RegExpTerm, TRegExpAlt { + RegExpAlt() { this = TRegExpAlt(re, start, end) } + + override RegExpTerm getChild(int i) { + i = 0 and + result.getRegex() = re and + result.getStart() = start and + exists(int part_end | + re.alternationOption(start, end, start, part_end) and + result.getEnd() = part_end + ) + or + i > 0 and + result.getRegex() = re and + exists(int part_start | + part_start = this.getChild(i - 1).getEnd() + 1 // allow for the | + | + result.getStart() = part_start and + re.alternationOption(start, end, part_start, result.getEnd()) + ) + } + + override string getPrimaryQLClass() { result = "RegExpAlt" } +} + +/** + * An escaped regular expression term, that is, a regular expression + * term starting with a backslash, which is not a backreference. + * + * Example: + * + * ``` + * \. + * \w + * ``` + */ +class RegExpEscape extends RegExpNormalChar { + RegExpEscape() { re.escapedCharacter(start, end) } + + /** + * Gets the name of the escaped; for example, `w` for `\w`. + * TODO: Handle named escapes. + */ + override string getValue() { + this.isIdentityEscape() and result = this.getUnescaped() + or + this.getUnescaped() = "n" and result = "\n" + or + this.getUnescaped() = "r" and result = "\r" + or + this.getUnescaped() = "t" and result = "\t" + or + this.getUnescaped() = "f" and result = " " + or + isUnicode() and + result = getUnicode() + } + + predicate isIdentityEscape() { not this.getUnescaped() in ["n", "r", "t", "f"] } + + override string getPrimaryQLClass() { result = "RegExpEscape" } + + string getUnescaped() { result = this.getText().suffix(1) } + + /** + * Gets the text for this escape. That is e.g. "\w". + */ + private string getText() { result = re.getText().substring(start, end) } + + /** + * Holds if this is a unicode escape. + */ + private predicate isUnicode() { getText().prefix(2) = ["\\u", "\\U"] } + + /** + * Gets the unicode char for this escape. + * E.g. for `\u0061` this returns "a". + */ + private string getUnicode() { + exists(int codepoint | codepoint = sum(getHexValueFromUnicode(_)) | + result = codepoint.toUnicode() + ) + } + + /** + * Gets int value for the `index`th char in the hex number of the unicode escape. + * E.g. for `\u0061` and `index = 2` this returns 96 (the number `6` interpreted as hex). + */ + private int getHexValueFromUnicode(int index) { + isUnicode() and + exists(string hex, string char | hex = getText().suffix(2) | + char = hex.charAt(index) and + result = 16.pow(hex.length() - index - 1) * toHex(char) + ) + } +} + +/** + * Gets the hex number for the `hex` char. + */ +private int toHex(string hex) { + hex = [0 .. 9].toString() and + result = hex.toInt() + or + result = 10 and hex = ["a", "A"] + or + result = 11 and hex = ["b", "B"] + or + result = 12 and hex = ["c", "C"] + or + result = 13 and hex = ["d", "D"] + or + result = 14 and hex = ["e", "E"] + or + result = 15 and hex = ["f", "F"] +} + +/** + * A character class escape in a regular expression. + * That is, an escaped charachter that denotes multiple characters. + * + * Examples: + * + * ``` + * \w + * \S + * ``` + */ +class RegExpCharacterClassEscape extends RegExpEscape { + // string value; + RegExpCharacterClassEscape() { + // value = re.getText().substring(start + 1, end) and + // value in ["d", "D", "s", "S", "w", "W"] + this.getValue() in ["d", "D", "s", "S", "w", "W"] + } + + /** Gets the name of the character class; for example, `w` for `\w`. */ + // override string getValue() { result = value } + override RegExpTerm getChild(int i) { none() } + + override string getPrimaryQLClass() { result = "RegExpCharacterClassEscape" } +} + +/** + * A character class in a regular expression. + * + * Examples: + * + * ``` + * [a-z_] + * [^<>&] + * ``` + */ +class RegExpCharacterClass extends RegExpTerm, TRegExpCharacterClass { + RegExpCharacterClass() { this = TRegExpCharacterClass(re, start, end) } + + predicate isInverted() { re.getChar(start + 1) = "^" } + + string getCharThing(int i) { result = re.getChar(i + start) } + + predicate isUniversalClass() { + // [^] + isInverted() and not exists(getAChild()) + or + // [\w\W] and similar + not isInverted() and + exists(string cce1, string cce2 | + cce1 = getAChild().(RegExpCharacterClassEscape).getValue() and + cce2 = getAChild().(RegExpCharacterClassEscape).getValue() + | + cce1 != cce2 and cce1.toLowerCase() = cce2.toLowerCase() + ) + } + + override RegExpTerm getChild(int i) { + i = 0 and + result.getRegex() = re and + exists(int itemStart, int itemEnd | + result.getStart() = itemStart and + re.char_set_start(start, itemStart) and + re.char_set_child(start, itemStart, itemEnd) and + result.getEnd() = itemEnd + ) + or + i > 0 and + result.getRegex() = re and + exists(int itemStart | itemStart = this.getChild(i - 1).getEnd() | + result.getStart() = itemStart and + re.char_set_child(start, itemStart, result.getEnd()) + ) + } + + override string getPrimaryQLClass() { result = "RegExpCharacterClass" } +} + +/** + * A character range in a character class in a regular expression. + * + * Example: + * + * ``` + * a-z + * ``` + */ +class RegExpCharacterRange extends RegExpTerm, TRegExpCharacterRange { + int lower_end; + int upper_start; + + RegExpCharacterRange() { + this = TRegExpCharacterRange(re, start, end) and + re.charRange(_, start, lower_end, upper_start, end) + } + + predicate isRange(string lo, string hi) { + lo = re.getText().substring(start, lower_end) and + hi = re.getText().substring(upper_start, end) + } + + override RegExpTerm getChild(int i) { + i = 0 and + result.getRegex() = re and + result.getStart() = start and + result.getEnd() = lower_end + or + i = 1 and + result.getRegex() = re and + result.getStart() = upper_start and + result.getEnd() = end + } + + override string getPrimaryQLClass() { result = "RegExpCharacterRange" } +} + +/** + * A normal character in a regular expression, that is, a character + * without special meaning. This includes escaped characters. + * + * Examples: + * ``` + * t + * \t + * ``` + */ +class RegExpNormalChar extends RegExpTerm, TRegExpNormalChar { + RegExpNormalChar() { this = TRegExpNormalChar(re, start, end) } + + predicate isCharacter() { any() } + + string getValue() { result = re.getText().substring(start, end) } + + override RegExpTerm getChild(int i) { none() } + + override string getPrimaryQLClass() { result = "RegExpNormalChar" } +} + +/** + * A constant regular expression term, that is, a regular expression + * term matching a single string. Currently, this will always be a single character. + * + * Example: + * + * ``` + * a + * ``` + */ +class RegExpConstant extends RegExpTerm { + string value; + + RegExpConstant() { + this = TRegExpNormalChar(re, start, end) and + not this instanceof RegExpCharacterClassEscape and + // exclude chars in qualifiers + // TODO: push this into regex library + not exists(int qstart, int qend | re.qualifiedPart(_, qstart, qend, _, _) | + qstart <= start and end <= qend + ) and + value = this.(RegExpNormalChar).getValue() + // This will never hold + // or + // this = TRegExpSpecialChar(re, start, end) and + // re.inCharSet(start) and + // value = this.(RegExpSpecialChar).getChar() + } + + predicate isCharacter() { any() } + + string getValue() { result = value } + + override RegExpTerm getChild(int i) { none() } + + override string getPrimaryQLClass() { result = "RegExpConstant" } +} + +/** + * A grouped regular expression. + * + * Examples: + * + * ``` + * (ECMA|Java) + * (?:ECMA|Java) + * (?['"]) + * ``` + */ +class RegExpGroup extends RegExpTerm, TRegExpGroup { + RegExpGroup() { this = TRegExpGroup(re, start, end) } + + /** + * Gets the index of this capture group within the enclosing regular + * expression literal. + * + * For example, in the regular expression `/((a?).)(?:b)/`, the + * group `((a?).)` has index 1, the group `(a?)` nested inside it + * has index 2, and the group `(?:b)` has no index, since it is + * not a capture group. + */ + int getNumber() { result = re.getGroupNumber(start, end) } + + /** Holds if this is a named capture group. */ + predicate isNamed() { exists(this.getName()) } + + /** Gets the name of this capture group, if any. */ + string getName() { result = re.getGroupName(start, end) } + + predicate isCharacter() { any() } + + string getValue() { result = re.getText().substring(start, end) } + + override RegExpTerm getChild(int i) { + result.getRegex() = re and + i = 0 and + re.groupContents(start, end, result.getStart(), result.getEnd()) + } + + override string getPrimaryQLClass() { result = "RegExpGroup" } +} + +/** + * A special character in a regular expression. + * + * Examples: + * ``` + * ^ + * $ + * . + * ``` + */ +class RegExpSpecialChar extends RegExpTerm, TRegExpSpecialChar { + string char; + + RegExpSpecialChar() { + this = TRegExpSpecialChar(re, start, end) and + re.specialCharacter(start, end, char) + } + + predicate isCharacter() { any() } + + string getChar() { result = char } + + override RegExpTerm getChild(int i) { none() } + + override string getPrimaryQLClass() { result = "RegExpSpecialChar" } +} + +/** + * A dot regular expression. + * + * Example: + * + * ``` + * . + * ``` + */ +class RegExpDot extends RegExpSpecialChar { + RegExpDot() { this.getChar() = "." } + + override string getPrimaryQLClass() { result = "RegExpDot" } +} + +/** + * A dollar assertion `$` matching the end of a line. + * + * Example: + * + * ``` + * $ + * ``` + */ +class RegExpDollar extends RegExpSpecialChar { + RegExpDollar() { this.getChar() = "$" } + + override string getPrimaryQLClass() { result = "RegExpDollar" } +} + +/** + * A caret assertion `^` matching the beginning of a line. + * + * Example: + * + * ``` + * ^ + * ``` + */ +class RegExpCaret extends RegExpSpecialChar { + RegExpCaret() { this.getChar() = "^" } + + override string getPrimaryQLClass() { result = "RegExpCaret" } +} + +/** + * A zero-width match, that is, either an empty group or an assertion. + * + * Examples: + * ``` + * () + * (?=\w) + * ``` + */ +class RegExpZeroWidthMatch extends RegExpGroup { + RegExpZeroWidthMatch() { re.zeroWidthMatch(start, end) } + + override predicate isCharacter() { any() } + + override RegExpTerm getChild(int i) { none() } + + override string getPrimaryQLClass() { result = "RegExpZeroWidthMatch" } +} + +/** + * A zero-width lookahead or lookbehind assertion. + * + * Examples: + * + * ``` + * (?=\w) + * (?!\n) + * (?<=\.) + * (?` + * in a regular expression. + * + * Examples: + * + * ``` + * \1 + * (?P=quote) + * ``` + */ +class RegExpBackRef extends RegExpTerm, TRegExpBackRef { + RegExpBackRef() { this = TRegExpBackRef(re, start, end) } + + /** + * Gets the number of the capture group this back reference refers to, if any. + */ + int getNumber() { result = re.getBackrefNumber(start, end) } + + /** + * Gets the name of the capture group this back reference refers to, if any. + */ + string getName() { result = re.getBackrefName(start, end) } + + /** Gets the capture group this back reference refers to. */ + RegExpGroup getGroup() { + result.getLiteral() = this.getLiteral() and + ( + result.getNumber() = this.getNumber() or + result.getName() = this.getName() + ) + } + + override RegExpTerm getChild(int i) { none() } + + override string getPrimaryQLClass() { result = "RegExpBackRef" } +} + +/** Gets the parse tree resulting from parsing `re`, if such has been constructed. */ +RegExpTerm getParsedRegExp(StrConst re) { result.getRegex() = re and result.isRootTerm() } From 2c27ce7aa5d8bffb83dbfee18cf564a78a47f289 Mon Sep 17 00:00:00 2001 From: Rasmus Lerchedahl Petersen Date: Mon, 28 Jun 2021 16:26:44 +0200 Subject: [PATCH 05/22] Python: Make ast viewer see regexes This work is due to @erik-krogh who also - made corresponding fixes to `RegexTreeView.qll` - implemented `toUnicode` so it is available on `String`s --- python/ql/src/semmle/python/PrintAst.qll | 42 +++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/python/ql/src/semmle/python/PrintAst.qll b/python/ql/src/semmle/python/PrintAst.qll index 63ec5b53d0a2..e06e8bc9ffb4 100644 --- a/python/ql/src/semmle/python/PrintAst.qll +++ b/python/ql/src/semmle/python/PrintAst.qll @@ -7,6 +7,7 @@ */ import python +import semmle.python.RegexTreeView private newtype TPrintAstConfiguration = MkPrintAstConfiguration() @@ -53,6 +54,9 @@ private newtype TPrintAstNode = not list = any(Module mod).getBody() and not forall(AstNode child | child = list.getAnItem() | isNotNeeded(child)) and exists(list.getAnItem()) + } or + TRegExpTermNode(RegExpTerm term) { + exists(StrConst str | term.getRootTerm() = getParsedRegExp(str) and shouldPrint(str, _)) } /** @@ -419,6 +423,42 @@ class ParameterNode extends AstElementNode { } } +/** + * A print node for a `StrConst`. + * + * The string has a child, if the child is used as a regular expression, + * which is the root of the regular expression. + */ +class StrConstNode extends AstElementNode { + override StrConst element; + + override PrintAstNode getChild(int childIndex) { + childIndex = 0 and result.(RegExpTermNode).getTerm() = getParsedRegExp(element) + } +} + +/** + * A print node for a regular expression term. + */ +class RegExpTermNode extends TRegExpTermNode, PrintAstNode { + RegExpTerm term; + + RegExpTermNode() { this = TRegExpTermNode(term) } + + /** Gets the `RegExpTerm` for this node. */ + RegExpTerm getTerm() { result = term } + + override PrintAstNode getChild(int childIndex) { + result.(RegExpTermNode).getTerm() = term.getChild(childIndex) + } + + override string toString() { + result = "[" + strictconcat(term.getPrimaryQLClass(), " | ") + "] " + term.toString() + } + + override Location getLocation() { result = term.getLocation() } +} + /** * Gets the `i`th child from `node` ordered by location. */ @@ -447,7 +487,7 @@ private module PrettyPrinting { string getQlClass(AstNode a) { shouldPrint(a, _) and ( - not exists(getQlCustomClass(a)) and result = a.toString() + not exists(getQlCustomClass(a)) and result = strictconcat(a.toString(), " | ") or result = strictconcat(getQlCustomClass(a), " | ") ) From d2eeaff441b6dc5c847c324d5d08e61a2e6b518b Mon Sep 17 00:00:00 2001 From: Rasmus Lerchedahl Petersen Date: Mon, 28 Jun 2021 16:44:54 +0200 Subject: [PATCH 06/22] JS: Refactor ReDoS to make files sharable the extra ordering conditions in ReDoSUtil will be needed for the Python implementation. --- javascript/ql/src/Performance/ReDoS.ql | 323 +---------------- .../performance/ExponentialBackTracking.qll | 342 ++++++++++++++++++ .../security/performance/ReDoSUtil.qll | 16 +- .../security/performance/RegExpTreeView.qll | 14 + .../performance/SuperlinearBackTracking.qll | 1 - 5 files changed, 368 insertions(+), 328 deletions(-) create mode 100644 javascript/ql/src/semmle/javascript/security/performance/ExponentialBackTracking.qll create mode 100644 javascript/ql/src/semmle/javascript/security/performance/RegExpTreeView.qll diff --git a/javascript/ql/src/Performance/ReDoS.ql b/javascript/ql/src/Performance/ReDoS.ql index 1f712810a50c..bda773bf0e7d 100644 --- a/javascript/ql/src/Performance/ReDoS.ql +++ b/javascript/ql/src/Performance/ReDoS.ql @@ -16,328 +16,7 @@ import javascript import semmle.javascript.security.performance.ReDoSUtil - -/* - * This query implements the analysis described in the following two papers: - * - * James Kirrage, Asiri Rathnayake, Hayo Thielecke: Static Analysis for - * Regular Expression Denial-of-Service Attacks. NSS 2013. - * (http://www.cs.bham.ac.uk/~hxt/research/reg-exp-sec.pdf) - * Asiri Rathnayake, Hayo Thielecke: Static Analysis for Regular Expression - * Exponential Runtime via Substructural Logics. 2014. - * (https://www.cs.bham.ac.uk/~hxt/research/redos_full.pdf) - * - * The basic idea is to search for overlapping cycles in the NFA, that is, - * states `q` such that there are two distinct paths from `q` to itself - * that consume the same word `w`. - * - * For any such state `q`, an attack string can be constructed as follows: - * concatenate a prefix `v` that takes the NFA to `q` with `n` copies of - * the word `w` that leads back to `q` along two different paths, followed - * by a suffix `x` that is _not_ accepted in state `q`. A backtracking - * implementation will need to explore at least 2^n different ways of going - * from `q` back to itself while trying to match the `n` copies of `w` - * before finally giving up. - * - * Now in order to identify overlapping cycles, all we have to do is find - * pumpable forks, that is, states `q` that can transition to two different - * states `r1` and `r2` on the same input symbol `c`, such that there are - * paths from both `r1` and `r2` to `q` that consume the same word. The latter - * condition is equivalent to saying that `(q, q)` is reachable from `(r1, r2)` - * in the product NFA. - * - * This is what the query does. It makes a simple attempt to construct a - * prefix `v` leading into `q`, but only to improve the alert message. - * And the query tries to prove the existence of a suffix that ensures - * rejection. This check might fail, which can cause false positives. - * - * Finally, sometimes it depends on the translation whether the NFA generated - * for a regular expression has a pumpable fork or not. We implement one - * particular translation, which may result in false positives or negatives - * relative to some particular JavaScript engine. - * - * More precisely, the query constructs an NFA from a regular expression `r` - * as follows: - * - * * Every sub-term `t` gives rise to an NFA state `Match(t,i)`, representing - * the state of the automaton before attempting to match the `i`th character in `t`. - * * There is one accepting state `Accept(r)`. - * * There is a special `AcceptAnySuffix(r)` state, which accepts any suffix string - * by using an epsilon transition to `Accept(r)` and an any transition to itself. - * * Transitions between states may be labelled with epsilon, or an abstract - * input symbol. - * * Each abstract input symbol represents a set of concrete input characters: - * either a single character, a set of characters represented by a - * character class, or the set of all characters. - * * The product automaton is constructed lazily, starting with pair states - * `(q, q)` where `q` is a fork, and proceding along an over-approximate - * step relation. - * * The over-approximate step relation allows transitions along pairs of - * abstract input symbols where the symbols have overlap in the characters they accept. - * * Once a trace of pairs of abstract input symbols that leads from a fork - * back to itself has been identified, we attempt to construct a concrete - * string corresponding to it, which may fail. - * * Lastly we ensure that any state reached by repeating `n` copies of `w` has - * a suffix `x` (possible empty) that is most likely __not__ accepted. - */ - -/** - * Holds if state `s` might be inside a backtracking repetition. - */ -pragma[noinline] -predicate stateInsideBacktracking(State s) { - s.getRepr().getParent*() instanceof MaybeBacktrackingRepetition -} - -/** - * A infinitely repeating quantifier that might backtrack. - */ -class MaybeBacktrackingRepetition extends InfiniteRepetitionQuantifier { - MaybeBacktrackingRepetition() { - exists(RegExpTerm child | - child instanceof RegExpAlt or - child instanceof RegExpQuantifier - | - child.getParent+() = this - ) - } -} - -/** - * A state in the product automaton. - * - * We lazily only construct those states that we are actually - * going to need: `(q, q)` for every fork state `q`, and any - * pair of states that can be reached from a pair that we have - * already constructed. To cut down on the number of states, - * we only represent states `(q1, q2)` where `q1` is lexicographically - * no bigger than `q2`. - * - * States are only constructed if both states in the pair are - * inside a repetition that might backtrack. - */ -newtype TStatePair = - MkStatePair(State q1, State q2) { - isFork(q1, _, _, _, _) and q2 = q1 - or - (step(_, _, _, q1, q2) or step(_, _, _, q2, q1)) and - rankState(q1) <= rankState(q2) - } - -/** - * Gets a unique number for a `state`. - * Is used to create an ordering of states, where states with the same `toString()` will be ordered differently. - */ -int rankState(State state) { - state = - rank[result](State s, Location l | - l = s.getRepr().getLocation() - | - s order by l.getStartLine(), l.getStartColumn(), s.toString() - ) -} - -class StatePair extends TStatePair { - State q1; - State q2; - - StatePair() { this = MkStatePair(q1, q2) } - - string toString() { result = "(" + q1 + ", " + q2 + ")" } - - State getLeft() { result = q1 } - - State getRight() { result = q2 } -} - -predicate isStatePair(StatePair p) { any() } - -predicate delta2(StatePair q, StatePair r) { step(q, _, _, r) } - -/** - * Gets the minimum length of a path from `q` to `r` in the - * product automaton. - */ -int statePairDist(StatePair q, StatePair r) = - shortestDistances(isStatePair/1, delta2/2)(q, r, result) - -/** - * Holds if there are transitions from `q` to `r1` and from `q` to `r2` - * labelled with `s1` and `s2`, respectively, where `s1` and `s2` do not - * trivially have an empty intersection. - * - * This predicate only holds for states associated with regular expressions - * that have at least one repetition quantifier in them (otherwise the - * expression cannot be vulnerable to ReDoS attacks anyway). - */ -pragma[noopt] -predicate isFork(State q, InputSymbol s1, InputSymbol s2, State r1, State r2) { - stateInsideBacktracking(q) and - exists(State q1, State q2 | - q1 = epsilonSucc*(q) and - delta(q1, s1, r1) and - q2 = epsilonSucc*(q) and - delta(q2, s2, r2) and - // Use pragma[noopt] to prevent intersect(s1,s2) from being the starting point of the join. - // From (s1,s2) it would find a huge number of intermediate state pairs (q1,q2) originating from different literals, - // and discover at the end that no `q` can reach both `q1` and `q2` by epsilon transitions. - exists(intersect(s1, s2)) - | - s1 != s2 - or - r1 != r2 - or - r1 = r2 and q1 != q2 - or - // If q can reach itself by epsilon transitions, then there are two distinct paths to the q1/q2 state: - // one that uses the loop and one that doesn't. The engine will separately attempt to match with each path, - // despite ending in the same state. The "fork" thus arises from the choice of whether to use the loop or not. - // To avoid every state in the loop becoming a fork state, - // we arbitrarily pick the InfiniteRepetitionQuantifier state as the canonical fork state for the loop - // (every epsilon-loop must contain such a state). - // - // We additionally require that the there exists another InfiniteRepetitionQuantifier `mid` on the path from `q` to itself. - // This is done to avoid flagging regular expressions such as `/(a?)*b/` - that only has polynomial runtime, and is detected by `js/polynomial-redos`. - // The below code is therefore a heuritic, that only flags regular expressions such as `/(a*)*b/`, - // and does not flag regular expressions such as `/(a?b?)c/`, but the latter pattern is not used frequently. - r1 = r2 and - q1 = q2 and - epsilonSucc+(q) = q and - exists(RegExpTerm term | term = q.getRepr() | term instanceof InfiniteRepetitionQuantifier) and - // One of the mid states is an infinite quantifier itself - exists(State mid, RegExpTerm term | - mid = epsilonSucc+(q) and - term = mid.getRepr() and - term instanceof InfiniteRepetitionQuantifier and - q = epsilonSucc+(mid) and - not mid = q - ) - ) and - stateInsideBacktracking(r1) and - stateInsideBacktracking(r2) -} - -/** - * Gets the state pair `(q1, q2)` or `(q2, q1)`; note that only - * one or the other is defined. - */ -StatePair mkStatePair(State q1, State q2) { - result = MkStatePair(q1, q2) or result = MkStatePair(q2, q1) -} - -/** - * Holds if there are transitions from the components of `q` to the corresponding - * components of `r` labelled with `s1` and `s2`, respectively. - */ -predicate step(StatePair q, InputSymbol s1, InputSymbol s2, StatePair r) { - exists(State r1, State r2 | step(q, s1, s2, r1, r2) and r = mkStatePair(r1, r2)) -} - -/** - * Holds if there are transitions from the components of `q` to `r1` and `r2` - * labelled with `s1` and `s2`, respectively. - * - * We only consider transitions where the resulting states `(r1, r2)` are both - * inside a repetition that might backtrack. - */ -pragma[noopt] -predicate step(StatePair q, InputSymbol s1, InputSymbol s2, State r1, State r2) { - exists(State q1, State q2 | q.getLeft() = q1 and q.getRight() = q2 | - deltaClosed(q1, s1, r1) and - deltaClosed(q2, s2, r2) and - // use noopt to force the join on `intersect` to happen last. - exists(intersect(s1, s2)) - ) and - stateInsideBacktracking(r1) and - stateInsideBacktracking(r2) -} - -private newtype TTrace = - Nil() or - Step(InputSymbol s1, InputSymbol s2, TTrace t) { - exists(StatePair p | - isReachableFromFork(_, p, t, _) and - step(p, s1, s2, _) - ) - or - t = Nil() and isFork(_, s1, s2, _, _) - } - -/** - * A list of pairs of input symbols that describe a path in the product automaton - * starting from some fork state. - */ -class Trace extends TTrace { - string toString() { - this = Nil() and result = "Nil()" - or - exists(InputSymbol s1, InputSymbol s2, Trace t | this = Step(s1, s2, t) | - result = "Step(" + s1 + ", " + s2 + ", " + t + ")" - ) - } -} - -/** - * Gets a string corresponding to the trace `t`. - */ -string concretise(Trace t) { - t = Nil() and result = "" - or - exists(InputSymbol s1, InputSymbol s2, Trace rest | t = Step(s1, s2, rest) | - result = concretise(rest) + intersect(s1, s2) - ) -} - -/** - * Holds if `r` is reachable from `(fork, fork)` under input `w`, and there is - * a path from `r` back to `(fork, fork)` with `rem` steps. - */ -predicate isReachableFromFork(State fork, StatePair r, Trace w, int rem) { - // base case - exists(InputSymbol s1, InputSymbol s2, State q1, State q2 | - isFork(fork, s1, s2, q1, q2) and - r = MkStatePair(q1, q2) and - w = Step(s1, s2, Nil()) and - rem = statePairDist(r, MkStatePair(fork, fork)) - ) - or - // recursive case - exists(StatePair p, Trace v, InputSymbol s1, InputSymbol s2 | - isReachableFromFork(fork, p, v, rem + 1) and - step(p, s1, s2, r) and - w = Step(s1, s2, v) and - rem >= statePairDist(r, MkStatePair(fork, fork)) - ) -} - -/** - * Gets a state in the product automaton from which `(fork, fork)` is - * reachable in zero or more epsilon transitions. - */ -StatePair getAForkPair(State fork) { - isFork(fork, _, _, _, _) and - result = MkStatePair(epsilonPred*(fork), epsilonPred*(fork)) -} - -/** - * Holds if `fork` is a pumpable fork with word `w`. - */ -predicate isPumpable(State fork, string w) { - exists(StatePair q, Trace t | - isReachableFromFork(fork, q, t, _) and - q = getAForkPair(fork) and - w = concretise(t) - ) -} - -/** - * An instantiation of `ReDoSConfiguration` for exponential backtracking. - */ -class ExponentialReDoSConfiguration extends ReDoSConfiguration { - ExponentialReDoSConfiguration() { this = "ExponentialReDoSConfiguration" } - - override predicate isReDoSCandidate(State state, string pump) { isPumpable(state, pump) } -} +import semmle.javascript.security.performance.ExponentialBackTracking from RegExpTerm t, string pump, State s, string prefixMsg where hasReDoSResult(t, pump, s, prefixMsg) diff --git a/javascript/ql/src/semmle/javascript/security/performance/ExponentialBackTracking.qll b/javascript/ql/src/semmle/javascript/security/performance/ExponentialBackTracking.qll new file mode 100644 index 000000000000..61707b1f392f --- /dev/null +++ b/javascript/ql/src/semmle/javascript/security/performance/ExponentialBackTracking.qll @@ -0,0 +1,342 @@ +/** + * This library implements the analysis described in the following two papers: + * + * James Kirrage, Asiri Rathnayake, Hayo Thielecke: Static Analysis for + * Regular Expression Denial-of-Service Attacks. NSS 2013. + * (http://www.cs.bham.ac.uk/~hxt/research/reg-exp-sec.pdf) + * Asiri Rathnayake, Hayo Thielecke: Static Analysis for Regular Expression + * Exponential Runtime via Substructural Logics. 2014. + * (https://www.cs.bham.ac.uk/~hxt/research/redos_full.pdf) + * + * The basic idea is to search for overlapping cycles in the NFA, that is, + * states `q` such that there are two distinct paths from `q` to itself + * that consume the same word `w`. + * + * For any such state `q`, an attack string can be constructed as follows: + * concatenate a prefix `v` that takes the NFA to `q` with `n` copies of + * the word `w` that leads back to `q` along two different paths, followed + * by a suffix `x` that is _not_ accepted in state `q`. A backtracking + * implementation will need to explore at least 2^n different ways of going + * from `q` back to itself while trying to match the `n` copies of `w` + * before finally giving up. + * + * Now in order to identify overlapping cycles, all we have to do is find + * pumpable forks, that is, states `q` that can transition to two different + * states `r1` and `r2` on the same input symbol `c`, such that there are + * paths from both `r1` and `r2` to `q` that consume the same word. The latter + * condition is equivalent to saying that `(q, q)` is reachable from `(r1, r2)` + * in the product NFA. + * + * This is what the library does. It makes a simple attempt to construct a + * prefix `v` leading into `q`, but only to improve the alert message. + * And the library tries to prove the existence of a suffix that ensures + * rejection. This check might fail, which can cause false positives. + * + * Finally, sometimes it depends on the translation whether the NFA generated + * for a regular expression has a pumpable fork or not. We implement one + * particular translation, which may result in false positives or negatives + * relative to some particular JavaScript engine. + * + * More precisely, the library constructs an NFA from a regular expression `r` + * as follows: + * + * * Every sub-term `t` gives rise to an NFA state `Match(t,i)`, representing + * the state of the automaton before attempting to match the `i`th character in `t`. + * * There is one accepting state `Accept(r)`. + * * There is a special `AcceptAnySuffix(r)` state, which accepts any suffix string + * by using an epsilon transition to `Accept(r)` and an any transition to itself. + * * Transitions between states may be labelled with epsilon, or an abstract + * input symbol. + * * Each abstract input symbol represents a set of concrete input characters: + * either a single character, a set of characters represented by a + * character class, or the set of all characters. + * * The product automaton is constructed lazily, starting with pair states + * `(q, q)` where `q` is a fork, and proceding along an over-approximate + * step relation. + * * The over-approximate step relation allows transitions along pairs of + * abstract input symbols where the symbols have overlap in the characters they accept. + * * Once a trace of pairs of abstract input symbols that leads from a fork + * back to itself has been identified, we attempt to construct a concrete + * string corresponding to it, which may fail. + * * Lastly we ensure that any state reached by repeating `n` copies of `w` has + * a suffix `x` (possible empty) that is most likely __not__ accepted. + */ + +import ReDoSUtil + +/** + * Holds if state `s` might be inside a backtracking repetition. + */ +pragma[noinline] +predicate stateInsideBacktracking(State s) { + s.getRepr().getParent*() instanceof MaybeBacktrackingRepetition +} + +/** + * A infinitely repeating quantifier that might backtrack. + */ +class MaybeBacktrackingRepetition extends InfiniteRepetitionQuantifier { + MaybeBacktrackingRepetition() { + exists(RegExpTerm child | + child instanceof RegExpAlt or + child instanceof RegExpQuantifier + | + child.getParent+() = this + ) + } +} + +/** + * A state in the product automaton. + */ +newtype TStatePair = + /** + * We lazily only construct those states that we are actually + * going to need: `(q, q)` for every fork state `q`, and any + * pair of states that can be reached from a pair that we have + * already constructed. To cut down on the number of states, + * we only represent states `(q1, q2)` where `q1` is lexicographically + * no bigger than `q2`. + * + * States are only constructed if both states in the pair are + * inside a repetition that might backtrack. + */ + MkStatePair(State q1, State q2) { + isFork(q1, _, _, _, _) and q2 = q1 + or + (step(_, _, _, q1, q2) or step(_, _, _, q2, q1)) and + rankState(q1) <= rankState(q2) + } + +/** + * Gets a unique number for a `state`. + * Is used to create an ordering of states, where states with the same `toString()` will be ordered differently. + */ +int rankState(State state) { + state = + rank[result](State s, Location l | + l = s.getRepr().getLocation() + | + s order by l.getStartLine(), l.getStartColumn(), s.toString() + ) +} + +/** + * A state in the product automaton. + */ +class StatePair extends TStatePair { + State q1; + State q2; + + StatePair() { this = MkStatePair(q1, q2) } + + /** Gets a textual representation of this element. */ + string toString() { result = "(" + q1 + ", " + q2 + ")" } + + /** Gets the first component of the state pair. */ + State getLeft() { result = q1 } + + /** Gets the second component of the state pair. */ + State getRight() { result = q2 } +} + +/** + * Holds for all constructed state pairs. + * + * Used in `statePairDist` + */ +predicate isStatePair(StatePair p) { any() } + +/** + * Holds if there are transitions from the components of `q` to the corresponding + * components of `r`. + * + * Used in `statePairDist` + */ +predicate delta2(StatePair q, StatePair r) { step(q, _, _, r) } + +/** + * Gets the minimum length of a path from `q` to `r` in the + * product automaton. + */ +int statePairDist(StatePair q, StatePair r) = + shortestDistances(isStatePair/1, delta2/2)(q, r, result) + +/** + * Holds if there are transitions from `q` to `r1` and from `q` to `r2` + * labelled with `s1` and `s2`, respectively, where `s1` and `s2` do not + * trivially have an empty intersection. + * + * This predicate only holds for states associated with regular expressions + * that have at least one repetition quantifier in them (otherwise the + * expression cannot be vulnerable to ReDoS attacks anyway). + */ +pragma[noopt] +predicate isFork(State q, InputSymbol s1, InputSymbol s2, State r1, State r2) { + stateInsideBacktracking(q) and + exists(State q1, State q2 | + q1 = epsilonSucc*(q) and + delta(q1, s1, r1) and + q2 = epsilonSucc*(q) and + delta(q2, s2, r2) and + // Use pragma[noopt] to prevent intersect(s1,s2) from being the starting point of the join. + // From (s1,s2) it would find a huge number of intermediate state pairs (q1,q2) originating from different literals, + // and discover at the end that no `q` can reach both `q1` and `q2` by epsilon transitions. + exists(intersect(s1, s2)) + | + s1 != s2 + or + r1 != r2 + or + r1 = r2 and q1 != q2 + or + // If q can reach itself by epsilon transitions, then there are two distinct paths to the q1/q2 state: + // one that uses the loop and one that doesn't. The engine will separately attempt to match with each path, + // despite ending in the same state. The "fork" thus arises from the choice of whether to use the loop or not. + // To avoid every state in the loop becoming a fork state, + // we arbitrarily pick the InfiniteRepetitionQuantifier state as the canonical fork state for the loop + // (every epsilon-loop must contain such a state). + // + // We additionally require that the there exists another InfiniteRepetitionQuantifier `mid` on the path from `q` to itself. + // This is done to avoid flagging regular expressions such as `/(a?)*b/` - that only has polynomial runtime, and is detected by `js/polynomial-redos`. + // The below code is therefore a heuritic, that only flags regular expressions such as `/(a*)*b/`, + // and does not flag regular expressions such as `/(a?b?)c/`, but the latter pattern is not used frequently. + r1 = r2 and + q1 = q2 and + epsilonSucc+(q) = q and + exists(RegExpTerm term | term = q.getRepr() | term instanceof InfiniteRepetitionQuantifier) and + // One of the mid states is an infinite quantifier itself + exists(State mid, RegExpTerm term | + mid = epsilonSucc+(q) and + term = mid.getRepr() and + term instanceof InfiniteRepetitionQuantifier and + q = epsilonSucc+(mid) and + not mid = q + ) + ) and + stateInsideBacktracking(r1) and + stateInsideBacktracking(r2) +} + +/** + * Gets the state pair `(q1, q2)` or `(q2, q1)`; note that only + * one or the other is defined. + */ +StatePair mkStatePair(State q1, State q2) { + result = MkStatePair(q1, q2) or result = MkStatePair(q2, q1) +} + +/** + * Holds if there are transitions from the components of `q` to the corresponding + * components of `r` labelled with `s1` and `s2`, respectively. + */ +predicate step(StatePair q, InputSymbol s1, InputSymbol s2, StatePair r) { + exists(State r1, State r2 | step(q, s1, s2, r1, r2) and r = mkStatePair(r1, r2)) +} + +/** + * Holds if there are transitions from the components of `q` to `r1` and `r2` + * labelled with `s1` and `s2`, respectively. + * + * We only consider transitions where the resulting states `(r1, r2)` are both + * inside a repetition that might backtrack. + */ +pragma[noopt] +predicate step(StatePair q, InputSymbol s1, InputSymbol s2, State r1, State r2) { + exists(State q1, State q2 | q.getLeft() = q1 and q.getRight() = q2 | + deltaClosed(q1, s1, r1) and + deltaClosed(q2, s2, r2) and + // use noopt to force the join on `intersect` to happen last. + exists(intersect(s1, s2)) + ) and + stateInsideBacktracking(r1) and + stateInsideBacktracking(r2) +} + +private newtype TTrace = + Nil() or + Step(InputSymbol s1, InputSymbol s2, TTrace t) { + exists(StatePair p | + isReachableFromFork(_, p, t, _) and + step(p, s1, s2, _) + ) + or + t = Nil() and isFork(_, s1, s2, _, _) + } + +/** + * A list of pairs of input symbols that describe a path in the product automaton + * starting from some fork state. + */ +class Trace extends TTrace { + /** Gets a textual representation of this element. */ + string toString() { + this = Nil() and result = "Nil()" + or + exists(InputSymbol s1, InputSymbol s2, Trace t | this = Step(s1, s2, t) | + result = "Step(" + s1 + ", " + s2 + ", " + t + ")" + ) + } +} + +/** + * Gets a string corresponding to the trace `t`. + */ +string concretise(Trace t) { + t = Nil() and result = "" + or + exists(InputSymbol s1, InputSymbol s2, Trace rest | t = Step(s1, s2, rest) | + result = concretise(rest) + intersect(s1, s2) + ) +} + +/** + * Holds if `r` is reachable from `(fork, fork)` under input `w`, and there is + * a path from `r` back to `(fork, fork)` with `rem` steps. + */ +predicate isReachableFromFork(State fork, StatePair r, Trace w, int rem) { + // base case + exists(InputSymbol s1, InputSymbol s2, State q1, State q2 | + isFork(fork, s1, s2, q1, q2) and + r = MkStatePair(q1, q2) and + w = Step(s1, s2, Nil()) and + rem = statePairDist(r, MkStatePair(fork, fork)) + ) + or + // recursive case + exists(StatePair p, Trace v, InputSymbol s1, InputSymbol s2 | + isReachableFromFork(fork, p, v, rem + 1) and + step(p, s1, s2, r) and + w = Step(s1, s2, v) and + rem >= statePairDist(r, MkStatePair(fork, fork)) + ) +} + +/** + * Gets a state in the product automaton from which `(fork, fork)` is + * reachable in zero or more epsilon transitions. + */ +StatePair getAForkPair(State fork) { + isFork(fork, _, _, _, _) and + result = MkStatePair(epsilonPred*(fork), epsilonPred*(fork)) +} + +/** + * Holds if `fork` is a pumpable fork with word `w`. + */ +predicate isPumpable(State fork, string w) { + exists(StatePair q, Trace t | + isReachableFromFork(fork, q, t, _) and + q = getAForkPair(fork) and + w = concretise(t) + ) +} + +/** + * An instantiation of `ReDoSConfiguration` for exponential backtracking. + */ +class ExponentialReDoSConfiguration extends ReDoSConfiguration { + ExponentialReDoSConfiguration() { this = "ExponentialReDoSConfiguration" } + + override predicate isReDoSCandidate(State state, string pump) { isPumpable(state, pump) } +} diff --git a/javascript/ql/src/semmle/javascript/security/performance/ReDoSUtil.qll b/javascript/ql/src/semmle/javascript/security/performance/ReDoSUtil.qll index a6f91c595dd0..40b05825bc6d 100644 --- a/javascript/ql/src/semmle/javascript/security/performance/ReDoSUtil.qll +++ b/javascript/ql/src/semmle/javascript/security/performance/ReDoSUtil.qll @@ -12,7 +12,7 @@ * states that will cause backtracking (a rejecting suffix exists). */ -import javascript +import RegExpTreeView /** * A configuration for which parts of a regular expression should be considered relevant for @@ -100,8 +100,8 @@ class RegExpRoot extends RegExpTerm { not exists(RegExpLookbehind lbh | getRoot(lbh) = this) and // is actually used as a RegExp isUsedAsRegExp() and - // pragmatic performance optimization: ignore minified files. - not getRootTerm().getParent().(Expr).getTopLevel().isMinified() + // not excluded for library specific reasons + not isExcluded(getRootTerm().getParent()) } } @@ -725,7 +725,10 @@ private module PrefixConstruction { max(State s, Location l | isStartState(s) and getRoot(s.getRepr()) = root and l = s.getRepr().getLocation() | - s order by l.getStartLine(), l.getStartColumn() + s + order by + l.getStartLine(), l.getStartColumn(), s.getRepr().toString(), l.getEndColumn(), + l.getEndLine() ) ) } @@ -767,7 +770,10 @@ private module PrefixConstruction { loc = s.getRepr().getLocation() and delta(s, _, state) | - s order by loc.getStartLine(), loc.getStartColumn(), loc.getEndLine(), loc.getEndColumn() + s + order by + loc.getStartLine(), loc.getStartColumn(), loc.getEndLine(), loc.getEndColumn(), + s.getRepr().toString() ) | // greedy search for the shortest prefix diff --git a/javascript/ql/src/semmle/javascript/security/performance/RegExpTreeView.qll b/javascript/ql/src/semmle/javascript/security/performance/RegExpTreeView.qll new file mode 100644 index 000000000000..f730f62f5b8a --- /dev/null +++ b/javascript/ql/src/semmle/javascript/security/performance/RegExpTreeView.qll @@ -0,0 +1,14 @@ +/** + * This module should provide a class hierarchy corresponding to a parse tree of regular expressions. + * + * Since the javascript extractor already provides such a hierarchy, we simply import that. + */ + +import javascript + +/** + * Holds if the regular expression should not be considered. + * + * For javascript we make the pragmatic performance optimization to ignore minified files. + */ +predicate isExcluded(RegExpParent parent) { parent.(Expr).getTopLevel().isMinified() } diff --git a/javascript/ql/src/semmle/javascript/security/performance/SuperlinearBackTracking.qll b/javascript/ql/src/semmle/javascript/security/performance/SuperlinearBackTracking.qll index 153093e96645..0bbff12b49d7 100644 --- a/javascript/ql/src/semmle/javascript/security/performance/SuperlinearBackTracking.qll +++ b/javascript/ql/src/semmle/javascript/security/performance/SuperlinearBackTracking.qll @@ -3,7 +3,6 @@ * perform backtracking in superlinear time. */ -import javascript import ReDoSUtil /* From 591b6ef69cf137cac1a197cbf997f4eb6be2e8da Mon Sep 17 00:00:00 2001 From: Rasmus Lerchedahl Petersen Date: Mon, 28 Jun 2021 16:56:49 +0200 Subject: [PATCH 07/22] Python: Add ReDoS as identical files from JS The library specific file is `RegExpTreeView`. The files are recorded as identical via the mapping in `identical-files.json`. --- config/identical-files.json | 12 + .../CWE-730/PolynomialBackTracking.ql | 6 + .../src/Security/CWE-730/PolynomialReDoS.ql | 33 + python/ql/src/Security/CWE-730/ReDoS.ql | 25 + .../python/regex/ExponentialBackTracking.qll | 342 ++++++ .../ql/src/semmle/python/regex/ReDoSUtil.qll | 1034 +++++++++++++++++ .../semmle/python/regex/RegExpTreeView.qll | 15 + .../python/regex/SuperlinearBackTracking.qll | 449 +++++++ .../security/dataflow/PolynomialReDoS.qll | 177 +++ 9 files changed, 2093 insertions(+) create mode 100644 python/ql/src/Security/CWE-730/PolynomialBackTracking.ql create mode 100644 python/ql/src/Security/CWE-730/PolynomialReDoS.ql create mode 100644 python/ql/src/Security/CWE-730/ReDoS.ql create mode 100644 python/ql/src/semmle/python/regex/ExponentialBackTracking.qll create mode 100644 python/ql/src/semmle/python/regex/ReDoSUtil.qll create mode 100644 python/ql/src/semmle/python/regex/RegExpTreeView.qll create mode 100644 python/ql/src/semmle/python/regex/SuperlinearBackTracking.qll create mode 100644 python/ql/src/semmle/python/security/dataflow/PolynomialReDoS.qll diff --git a/config/identical-files.json b/config/identical-files.json index 582e4c7b6dc3..b3be90b75b22 100644 --- a/config/identical-files.json +++ b/config/identical-files.json @@ -448,5 +448,17 @@ "SensitiveDataHeuristics Python/JS": [ "javascript/ql/src/semmle/javascript/security/internal/SensitiveDataHeuristics.qll", "python/ql/src/semmle/python/security/internal/SensitiveDataHeuristics.qll" + ], + "ReDoS Util Python/JS": [ + "javascript/ql/src/semmle/javascript/security/performance/ReDoSUtil.qll", + "python/ql/src/semmle/python/regex/ReDoSUtil.qll" + ], + "ReDoS Exponential Python/JS": [ + "javascript/ql/src/semmle/javascript/security/performance/ExponentialBackTracking.qll", + "python/ql/src/semmle/python/regex/ExponentialBackTracking.qll" + ], + "ReDoS Polynomial Python/JS": [ + "javascript/ql/src/semmle/javascript/security/performance/SuperlinearBackTracking.qll", + "python/ql/src/semmle/python/regex/SuperlinearBackTracking.qll" ] } diff --git a/python/ql/src/Security/CWE-730/PolynomialBackTracking.ql b/python/ql/src/Security/CWE-730/PolynomialBackTracking.ql new file mode 100644 index 000000000000..a98d4eefa7ec --- /dev/null +++ b/python/ql/src/Security/CWE-730/PolynomialBackTracking.ql @@ -0,0 +1,6 @@ +import python +import semmle.python.regex.SuperlinearBackTracking + +from PolynomialBackTrackingTerm t +where t.getLocation().getFile().getBaseName() = "KnownCVEs.py" +select t.getRegex(), t, t.getReason() diff --git a/python/ql/src/Security/CWE-730/PolynomialReDoS.ql b/python/ql/src/Security/CWE-730/PolynomialReDoS.ql new file mode 100644 index 000000000000..b948c1601a85 --- /dev/null +++ b/python/ql/src/Security/CWE-730/PolynomialReDoS.ql @@ -0,0 +1,33 @@ +/** + * @name Polynomial regular expression used on uncontrolled data + * @description A regular expression that can require polynomial time + * to match may be vulnerable to denial-of-service attacks. + * @kind path-problem + * @problem.severity warning + * @precision high + * @id py/polynomial-redos + * @tags security + * external/cwe/cwe-730 + * external/cwe/cwe-400 + */ + +import python +import semmle.python.regex.SuperlinearBackTracking +import semmle.python.security.dataflow.PolynomialReDoS +import DataFlow::PathGraph + +from + PolynomialReDoSConfiguration config, DataFlow::PathNode source, DataFlow::PathNode sink, + PolynomialReDoSSink sinkNode, PolynomialBackTrackingTerm regexp +where + config.hasFlowPath(source, sink) and + sinkNode = sink.getNode() and + regexp.getRootTerm() = sinkNode.getRegExp() +// not ( +// source.getNode().(Source).getKind() = "url" and +// regexp.isAtEndLine() +// ) +select sinkNode.getHighlight(), source, sink, + "This $@ that depends on $@ may run slow on strings " + regexp.getPrefixMessage() + + "with many repetitions of '" + regexp.getPumpString() + "'.", regexp, "regular expression", + source.getNode(), "a user-provided value" diff --git a/python/ql/src/Security/CWE-730/ReDoS.ql b/python/ql/src/Security/CWE-730/ReDoS.ql new file mode 100644 index 000000000000..aebc2f81cffa --- /dev/null +++ b/python/ql/src/Security/CWE-730/ReDoS.ql @@ -0,0 +1,25 @@ +/** + * @name Inefficient regular expression + * @description A regular expression that requires exponential time to match certain inputs + * can be a performance bottleneck, and may be vulnerable to denial-of-service + * attacks. + * @kind problem + * @problem.severity error + * @precision high + * @id py/redos + * @tags security + * external/cwe/cwe-730 + * external/cwe/cwe-400 + */ + +import python +import semmle.python.regex.ExponentialBackTracking + +from RegExpTerm t, string pump, State s, string prefixMsg +where + hasReDoSResult(t, pump, s, prefixMsg) and + // exclude verbose mode regexes for now + not t.getRegex().getAMode() = "VERBOSE" +select t, + "This part of the regular expression may cause exponential backtracking on strings " + prefixMsg + + "containing many repetitions of '" + pump + "'." diff --git a/python/ql/src/semmle/python/regex/ExponentialBackTracking.qll b/python/ql/src/semmle/python/regex/ExponentialBackTracking.qll new file mode 100644 index 000000000000..61707b1f392f --- /dev/null +++ b/python/ql/src/semmle/python/regex/ExponentialBackTracking.qll @@ -0,0 +1,342 @@ +/** + * This library implements the analysis described in the following two papers: + * + * James Kirrage, Asiri Rathnayake, Hayo Thielecke: Static Analysis for + * Regular Expression Denial-of-Service Attacks. NSS 2013. + * (http://www.cs.bham.ac.uk/~hxt/research/reg-exp-sec.pdf) + * Asiri Rathnayake, Hayo Thielecke: Static Analysis for Regular Expression + * Exponential Runtime via Substructural Logics. 2014. + * (https://www.cs.bham.ac.uk/~hxt/research/redos_full.pdf) + * + * The basic idea is to search for overlapping cycles in the NFA, that is, + * states `q` such that there are two distinct paths from `q` to itself + * that consume the same word `w`. + * + * For any such state `q`, an attack string can be constructed as follows: + * concatenate a prefix `v` that takes the NFA to `q` with `n` copies of + * the word `w` that leads back to `q` along two different paths, followed + * by a suffix `x` that is _not_ accepted in state `q`. A backtracking + * implementation will need to explore at least 2^n different ways of going + * from `q` back to itself while trying to match the `n` copies of `w` + * before finally giving up. + * + * Now in order to identify overlapping cycles, all we have to do is find + * pumpable forks, that is, states `q` that can transition to two different + * states `r1` and `r2` on the same input symbol `c`, such that there are + * paths from both `r1` and `r2` to `q` that consume the same word. The latter + * condition is equivalent to saying that `(q, q)` is reachable from `(r1, r2)` + * in the product NFA. + * + * This is what the library does. It makes a simple attempt to construct a + * prefix `v` leading into `q`, but only to improve the alert message. + * And the library tries to prove the existence of a suffix that ensures + * rejection. This check might fail, which can cause false positives. + * + * Finally, sometimes it depends on the translation whether the NFA generated + * for a regular expression has a pumpable fork or not. We implement one + * particular translation, which may result in false positives or negatives + * relative to some particular JavaScript engine. + * + * More precisely, the library constructs an NFA from a regular expression `r` + * as follows: + * + * * Every sub-term `t` gives rise to an NFA state `Match(t,i)`, representing + * the state of the automaton before attempting to match the `i`th character in `t`. + * * There is one accepting state `Accept(r)`. + * * There is a special `AcceptAnySuffix(r)` state, which accepts any suffix string + * by using an epsilon transition to `Accept(r)` and an any transition to itself. + * * Transitions between states may be labelled with epsilon, or an abstract + * input symbol. + * * Each abstract input symbol represents a set of concrete input characters: + * either a single character, a set of characters represented by a + * character class, or the set of all characters. + * * The product automaton is constructed lazily, starting with pair states + * `(q, q)` where `q` is a fork, and proceding along an over-approximate + * step relation. + * * The over-approximate step relation allows transitions along pairs of + * abstract input symbols where the symbols have overlap in the characters they accept. + * * Once a trace of pairs of abstract input symbols that leads from a fork + * back to itself has been identified, we attempt to construct a concrete + * string corresponding to it, which may fail. + * * Lastly we ensure that any state reached by repeating `n` copies of `w` has + * a suffix `x` (possible empty) that is most likely __not__ accepted. + */ + +import ReDoSUtil + +/** + * Holds if state `s` might be inside a backtracking repetition. + */ +pragma[noinline] +predicate stateInsideBacktracking(State s) { + s.getRepr().getParent*() instanceof MaybeBacktrackingRepetition +} + +/** + * A infinitely repeating quantifier that might backtrack. + */ +class MaybeBacktrackingRepetition extends InfiniteRepetitionQuantifier { + MaybeBacktrackingRepetition() { + exists(RegExpTerm child | + child instanceof RegExpAlt or + child instanceof RegExpQuantifier + | + child.getParent+() = this + ) + } +} + +/** + * A state in the product automaton. + */ +newtype TStatePair = + /** + * We lazily only construct those states that we are actually + * going to need: `(q, q)` for every fork state `q`, and any + * pair of states that can be reached from a pair that we have + * already constructed. To cut down on the number of states, + * we only represent states `(q1, q2)` where `q1` is lexicographically + * no bigger than `q2`. + * + * States are only constructed if both states in the pair are + * inside a repetition that might backtrack. + */ + MkStatePair(State q1, State q2) { + isFork(q1, _, _, _, _) and q2 = q1 + or + (step(_, _, _, q1, q2) or step(_, _, _, q2, q1)) and + rankState(q1) <= rankState(q2) + } + +/** + * Gets a unique number for a `state`. + * Is used to create an ordering of states, where states with the same `toString()` will be ordered differently. + */ +int rankState(State state) { + state = + rank[result](State s, Location l | + l = s.getRepr().getLocation() + | + s order by l.getStartLine(), l.getStartColumn(), s.toString() + ) +} + +/** + * A state in the product automaton. + */ +class StatePair extends TStatePair { + State q1; + State q2; + + StatePair() { this = MkStatePair(q1, q2) } + + /** Gets a textual representation of this element. */ + string toString() { result = "(" + q1 + ", " + q2 + ")" } + + /** Gets the first component of the state pair. */ + State getLeft() { result = q1 } + + /** Gets the second component of the state pair. */ + State getRight() { result = q2 } +} + +/** + * Holds for all constructed state pairs. + * + * Used in `statePairDist` + */ +predicate isStatePair(StatePair p) { any() } + +/** + * Holds if there are transitions from the components of `q` to the corresponding + * components of `r`. + * + * Used in `statePairDist` + */ +predicate delta2(StatePair q, StatePair r) { step(q, _, _, r) } + +/** + * Gets the minimum length of a path from `q` to `r` in the + * product automaton. + */ +int statePairDist(StatePair q, StatePair r) = + shortestDistances(isStatePair/1, delta2/2)(q, r, result) + +/** + * Holds if there are transitions from `q` to `r1` and from `q` to `r2` + * labelled with `s1` and `s2`, respectively, where `s1` and `s2` do not + * trivially have an empty intersection. + * + * This predicate only holds for states associated with regular expressions + * that have at least one repetition quantifier in them (otherwise the + * expression cannot be vulnerable to ReDoS attacks anyway). + */ +pragma[noopt] +predicate isFork(State q, InputSymbol s1, InputSymbol s2, State r1, State r2) { + stateInsideBacktracking(q) and + exists(State q1, State q2 | + q1 = epsilonSucc*(q) and + delta(q1, s1, r1) and + q2 = epsilonSucc*(q) and + delta(q2, s2, r2) and + // Use pragma[noopt] to prevent intersect(s1,s2) from being the starting point of the join. + // From (s1,s2) it would find a huge number of intermediate state pairs (q1,q2) originating from different literals, + // and discover at the end that no `q` can reach both `q1` and `q2` by epsilon transitions. + exists(intersect(s1, s2)) + | + s1 != s2 + or + r1 != r2 + or + r1 = r2 and q1 != q2 + or + // If q can reach itself by epsilon transitions, then there are two distinct paths to the q1/q2 state: + // one that uses the loop and one that doesn't. The engine will separately attempt to match with each path, + // despite ending in the same state. The "fork" thus arises from the choice of whether to use the loop or not. + // To avoid every state in the loop becoming a fork state, + // we arbitrarily pick the InfiniteRepetitionQuantifier state as the canonical fork state for the loop + // (every epsilon-loop must contain such a state). + // + // We additionally require that the there exists another InfiniteRepetitionQuantifier `mid` on the path from `q` to itself. + // This is done to avoid flagging regular expressions such as `/(a?)*b/` - that only has polynomial runtime, and is detected by `js/polynomial-redos`. + // The below code is therefore a heuritic, that only flags regular expressions such as `/(a*)*b/`, + // and does not flag regular expressions such as `/(a?b?)c/`, but the latter pattern is not used frequently. + r1 = r2 and + q1 = q2 and + epsilonSucc+(q) = q and + exists(RegExpTerm term | term = q.getRepr() | term instanceof InfiniteRepetitionQuantifier) and + // One of the mid states is an infinite quantifier itself + exists(State mid, RegExpTerm term | + mid = epsilonSucc+(q) and + term = mid.getRepr() and + term instanceof InfiniteRepetitionQuantifier and + q = epsilonSucc+(mid) and + not mid = q + ) + ) and + stateInsideBacktracking(r1) and + stateInsideBacktracking(r2) +} + +/** + * Gets the state pair `(q1, q2)` or `(q2, q1)`; note that only + * one or the other is defined. + */ +StatePair mkStatePair(State q1, State q2) { + result = MkStatePair(q1, q2) or result = MkStatePair(q2, q1) +} + +/** + * Holds if there are transitions from the components of `q` to the corresponding + * components of `r` labelled with `s1` and `s2`, respectively. + */ +predicate step(StatePair q, InputSymbol s1, InputSymbol s2, StatePair r) { + exists(State r1, State r2 | step(q, s1, s2, r1, r2) and r = mkStatePair(r1, r2)) +} + +/** + * Holds if there are transitions from the components of `q` to `r1` and `r2` + * labelled with `s1` and `s2`, respectively. + * + * We only consider transitions where the resulting states `(r1, r2)` are both + * inside a repetition that might backtrack. + */ +pragma[noopt] +predicate step(StatePair q, InputSymbol s1, InputSymbol s2, State r1, State r2) { + exists(State q1, State q2 | q.getLeft() = q1 and q.getRight() = q2 | + deltaClosed(q1, s1, r1) and + deltaClosed(q2, s2, r2) and + // use noopt to force the join on `intersect` to happen last. + exists(intersect(s1, s2)) + ) and + stateInsideBacktracking(r1) and + stateInsideBacktracking(r2) +} + +private newtype TTrace = + Nil() or + Step(InputSymbol s1, InputSymbol s2, TTrace t) { + exists(StatePair p | + isReachableFromFork(_, p, t, _) and + step(p, s1, s2, _) + ) + or + t = Nil() and isFork(_, s1, s2, _, _) + } + +/** + * A list of pairs of input symbols that describe a path in the product automaton + * starting from some fork state. + */ +class Trace extends TTrace { + /** Gets a textual representation of this element. */ + string toString() { + this = Nil() and result = "Nil()" + or + exists(InputSymbol s1, InputSymbol s2, Trace t | this = Step(s1, s2, t) | + result = "Step(" + s1 + ", " + s2 + ", " + t + ")" + ) + } +} + +/** + * Gets a string corresponding to the trace `t`. + */ +string concretise(Trace t) { + t = Nil() and result = "" + or + exists(InputSymbol s1, InputSymbol s2, Trace rest | t = Step(s1, s2, rest) | + result = concretise(rest) + intersect(s1, s2) + ) +} + +/** + * Holds if `r` is reachable from `(fork, fork)` under input `w`, and there is + * a path from `r` back to `(fork, fork)` with `rem` steps. + */ +predicate isReachableFromFork(State fork, StatePair r, Trace w, int rem) { + // base case + exists(InputSymbol s1, InputSymbol s2, State q1, State q2 | + isFork(fork, s1, s2, q1, q2) and + r = MkStatePair(q1, q2) and + w = Step(s1, s2, Nil()) and + rem = statePairDist(r, MkStatePair(fork, fork)) + ) + or + // recursive case + exists(StatePair p, Trace v, InputSymbol s1, InputSymbol s2 | + isReachableFromFork(fork, p, v, rem + 1) and + step(p, s1, s2, r) and + w = Step(s1, s2, v) and + rem >= statePairDist(r, MkStatePair(fork, fork)) + ) +} + +/** + * Gets a state in the product automaton from which `(fork, fork)` is + * reachable in zero or more epsilon transitions. + */ +StatePair getAForkPair(State fork) { + isFork(fork, _, _, _, _) and + result = MkStatePair(epsilonPred*(fork), epsilonPred*(fork)) +} + +/** + * Holds if `fork` is a pumpable fork with word `w`. + */ +predicate isPumpable(State fork, string w) { + exists(StatePair q, Trace t | + isReachableFromFork(fork, q, t, _) and + q = getAForkPair(fork) and + w = concretise(t) + ) +} + +/** + * An instantiation of `ReDoSConfiguration` for exponential backtracking. + */ +class ExponentialReDoSConfiguration extends ReDoSConfiguration { + ExponentialReDoSConfiguration() { this = "ExponentialReDoSConfiguration" } + + override predicate isReDoSCandidate(State state, string pump) { isPumpable(state, pump) } +} diff --git a/python/ql/src/semmle/python/regex/ReDoSUtil.qll b/python/ql/src/semmle/python/regex/ReDoSUtil.qll new file mode 100644 index 000000000000..40b05825bc6d --- /dev/null +++ b/python/ql/src/semmle/python/regex/ReDoSUtil.qll @@ -0,0 +1,1034 @@ +/** + * Provides classes for working with regular expressions that can + * perform backtracking in superlinear/exponential time. + * + * This module contains a number of utility predicates for compiling a regular expression into a NFA and reasoning about this NFA. + * + * The `ReDoSConfiguration` contains a `isReDoSCandidate` predicate that is used to + * to determine which states the prefix/suffix search should happen on. + * There is only meant to exist one `ReDoSConfiguration` at a time. + * + * The predicate `hasReDoSResult` outputs a de-duplicated set of + * states that will cause backtracking (a rejecting suffix exists). + */ + +import RegExpTreeView + +/** + * A configuration for which parts of a regular expression should be considered relevant for + * the different predicates in `ReDoS.qll`. + * Used to adjust the computations for either superlinear or exponential backtracking. + */ +abstract class ReDoSConfiguration extends string { + bindingset[this] + ReDoSConfiguration() { any() } + + /** + * Holds if `state` with the pump string `pump` is a candidate for a + * ReDoS vulnerable state. + * This is used to determine which states are considered for the prefix/suffix construction. + */ + abstract predicate isReDoSCandidate(State state, string pump); +} + +/** + * Holds if repeating `pump' starting at `state` is a candidate for causing backtracking. + * No check whether a rejected suffix exists has been made. + */ +private predicate isReDoSCandidate(State state, string pump) { + any(ReDoSConfiguration conf).isReDoSCandidate(state, pump) and + ( + not any(ReDoSConfiguration conf).isReDoSCandidate(epsilonSucc+(state), _) + or + epsilonSucc+(state) = state and + state = + max(State s, Location l | + s = epsilonSucc+(state) and + l = s.getRepr().getLocation() and + any(ReDoSConfiguration conf).isReDoSCandidate(s, _) and + s.getRepr() instanceof InfiniteRepetitionQuantifier + | + s order by l.getStartLine(), l.getStartColumn(), l.getEndColumn(), l.getEndLine() + ) + ) +} + +/** + * Gets the char after `c` (from a simplified ASCII table). + */ +private string nextChar(string c) { exists(int code | code = ascii(c) | code + 1 = ascii(result)) } + +/** + * Gets an approximation for the ASCII code for `char`. + * Only the easily printable chars are included (so no newline, tab, null, etc). + */ +private int ascii(string char) { + char = + rank[result](string c | + c = + "! \"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~" + .charAt(_) + ) +} + +/** + * A branch in a disjunction that is the root node in a literal, or a literal + * whose root node is not a disjunction. + */ +class RegExpRoot extends RegExpTerm { + RegExpParent parent; + + RegExpRoot() { + exists(RegExpAlt alt | + alt.isRootTerm() and + this = alt.getAChild() and + parent = alt.getParent() + ) + or + this.isRootTerm() and + not this instanceof RegExpAlt and + parent = this.getParent() + } + + /** + * Holds if this root term is relevant to the ReDoS analysis. + */ + predicate isRelevant() { + // there is at least one repetition + getRoot(any(InfiniteRepetitionQuantifier q)) = this and + // there are no lookbehinds + not exists(RegExpLookbehind lbh | getRoot(lbh) = this) and + // is actually used as a RegExp + isUsedAsRegExp() and + // not excluded for library specific reasons + not isExcluded(getRootTerm().getParent()) + } +} + +/** + * A constant in a regular expression that represents valid Unicode character(s). + */ +private class RegexpCharacterConstant extends RegExpConstant { + RegexpCharacterConstant() { this.isCharacter() } +} + +/** + * Holds if `term` is the chosen canonical representative for all terms with string representation `str`. + * + * Using canonical representatives gives a huge performance boost when working with tuples containing multiple `InputSymbol`s. + * The number of `InputSymbol`s is decreased by 3 orders of magnitude or more in some larger benchmarks. + */ +private predicate isCanonicalTerm(RegExpTerm term, string str) { + term = + rank[1](RegExpTerm t, Location loc, File file | + loc = t.getLocation() and + file = t.getFile() and + str = t.getRawValue() + | + t order by t.getFile().getRelativePath(), loc.getStartLine(), loc.getStartColumn() + ) +} + +/** + * An abstract input symbol, representing a set of concrete characters. + */ +private newtype TInputSymbol = + /** An input symbol corresponding to character `c`. */ + Char(string c) { + c = any(RegexpCharacterConstant cc | getRoot(cc).isRelevant()).getValue().charAt(_) + } or + /** + * An input symbol representing all characters matched by + * a (non-universal) character class that has string representation `charClassString`. + */ + CharClass(string charClassString) { + exists(RegExpTerm term | term.getRawValue() = charClassString | getRoot(term).isRelevant()) and + exists(RegExpTerm recc | isCanonicalTerm(recc, charClassString) | + recc instanceof RegExpCharacterClass and + not recc.(RegExpCharacterClass).isUniversalClass() + or + recc instanceof RegExpCharacterClassEscape + ) + } or + /** An input symbol representing all characters matched by `.`. */ + Dot() or + /** An input symbol representing all characters. */ + Any() or + /** An epsilon transition in the automaton. */ + Epsilon() + +/** + * Gets the canonical CharClass for `term`. + */ +CharClass getCanonicalCharClass(RegExpTerm term) { + exists(string str | isCanonicalTerm(term, str) | result = CharClass(str)) +} + +/** + * Holds if `a` and `b` are input symbols from the same regexp. + */ +private predicate sharesRoot(TInputSymbol a, TInputSymbol b) { + exists(RegExpRoot root | + belongsTo(a, root) and + belongsTo(b, root) + ) +} + +/** + * Holds if the `a` is an input symbol from a regexp that has root `root`. + */ +private predicate belongsTo(TInputSymbol a, RegExpRoot root) { + exists(State s | getRoot(s.getRepr()) = root | + delta(s, a, _) + or + delta(_, a, s) + ) +} + +/** + * An abstract input symbol, representing a set of concrete characters. + */ +class InputSymbol extends TInputSymbol { + InputSymbol() { not this instanceof Epsilon } + + /** + * Gets a string representation of this input symbol. + */ + string toString() { + this = Char(result) + or + this = CharClass(result) + or + this = Dot() and result = "." + or + this = Any() and result = "[^]" + } +} + +/** + * An abstract input symbol that represents a character class. + */ +abstract private class CharacterClass extends InputSymbol { + /** + * Gets a character that is relevant for intersection-tests involving this + * character class. + * + * Specifically, this is any of the characters mentioned explicitly in the + * character class, offset by one if it is inverted. For character class escapes, + * the result is as if the class had been written out as a series of intervals. + * + * This set is large enough to ensure that for any two intersecting character + * classes, one contains a relevant character from the other. + */ + abstract string getARelevantChar(); + + /** + * Holds if this character class matches `char`. + */ + bindingset[char] + abstract predicate matches(string char); + + /** + * Gets a character matched by this character class. + */ + string choose() { result = getARelevantChar() and matches(result) } +} + +/** + * Provides implementations for `CharacterClass`. + */ +private module CharacterClasses { + /** + * Holds if the character class `cc` has a child (constant or range) that matches `char`. + */ + pragma[noinline] + predicate hasChildThatMatches(RegExpCharacterClass cc, string char) { + exists(getCanonicalCharClass(cc)) and + exists(RegExpTerm child | child = cc.getAChild() | + char = child.(RegexpCharacterConstant).getValue() + or + rangeMatchesOnLetterOrDigits(child, char) + or + not rangeMatchesOnLetterOrDigits(child, _) and + char = getARelevantChar() and + exists(string lo, string hi | child.(RegExpCharacterRange).isRange(lo, hi) | + lo <= char and + char <= hi + ) + or + exists(RegExpCharacterClassEscape escape | escape = child | + escape.getValue() = escape.getValue().toLowerCase() and + classEscapeMatches(escape.getValue(), char) + or + char = getARelevantChar() and + escape.getValue() = escape.getValue().toUpperCase() and + not classEscapeMatches(escape.getValue().toLowerCase(), char) + ) + ) + } + + /** + * Holds if `range` is a range on lower-case, upper-case, or digits, and matches `char`. + * This predicate is used to restrict the searchspace for ranges by only joining `getAnyPossiblyMatchedChar` + * on a few ranges. + */ + private predicate rangeMatchesOnLetterOrDigits(RegExpCharacterRange range, string char) { + exists(string lo, string hi | + range.isRange(lo, hi) and lo = lowercaseLetter() and hi = lowercaseLetter() + | + lo <= char and + char <= hi and + char = lowercaseLetter() + ) + or + exists(string lo, string hi | + range.isRange(lo, hi) and lo = upperCaseLetter() and hi = upperCaseLetter() + | + lo <= char and + char <= hi and + char = upperCaseLetter() + ) + or + exists(string lo, string hi | range.isRange(lo, hi) and lo = digit() and hi = digit() | + lo <= char and + char <= hi and + char = digit() + ) + } + + private string lowercaseLetter() { result = "abdcefghijklmnopqrstuvwxyz".charAt(_) } + + private string upperCaseLetter() { result = "ABCDEFGHIJKLMNOPQRSTUVWXYZ".charAt(_) } + + private string digit() { result = [0 .. 9].toString() } + + /** + * Gets a char that could be matched by a regular expression. + * Includes all printable ascii chars, all constants mentioned in a regexp, and all chars matches by the regexp `/\s|\d|\w/`. + */ + string getARelevantChar() { + exists(ascii(result)) + or + exists(RegexpCharacterConstant c | result = c.getValue().charAt(_)) + or + classEscapeMatches(_, result) + } + + /** + * Gets a char that is mentioned in the character class `c`. + */ + private string getAMentionedChar(RegExpCharacterClass c) { + exists(RegExpTerm child | child = c.getAChild() | + result = child.(RegexpCharacterConstant).getValue() + or + child.(RegExpCharacterRange).isRange(result, _) + or + child.(RegExpCharacterRange).isRange(_, result) + or + exists(RegExpCharacterClassEscape escape | child = escape | + result = min(string s | classEscapeMatches(escape.getValue().toLowerCase(), s)) + or + result = max(string s | classEscapeMatches(escape.getValue().toLowerCase(), s)) + ) + ) + } + + /** + * An implementation of `CharacterClass` for positive (non inverted) character classes. + */ + private class PositiveCharacterClass extends CharacterClass { + RegExpCharacterClass cc; + + PositiveCharacterClass() { this = getCanonicalCharClass(cc) and not cc.isInverted() } + + override string getARelevantChar() { result = getAMentionedChar(cc) } + + override predicate matches(string char) { hasChildThatMatches(cc, char) } + } + + /** + * An implementation of `CharacterClass` for inverted character classes. + */ + private class InvertedCharacterClass extends CharacterClass { + RegExpCharacterClass cc; + + InvertedCharacterClass() { this = getCanonicalCharClass(cc) and cc.isInverted() } + + override string getARelevantChar() { + result = nextChar(getAMentionedChar(cc)) or + nextChar(result) = getAMentionedChar(cc) + } + + bindingset[char] + override predicate matches(string char) { not hasChildThatMatches(cc, char) } + } + + /** + * Holds if the character class escape `clazz` (\d, \s, or \w) matches `char`. + */ + pragma[noinline] + private predicate classEscapeMatches(string clazz, string char) { + clazz = "d" and + char = "0123456789".charAt(_) + or + clazz = "s" and + ( + char = [" ", "\t", "\r", "\n"] + or + char = getARelevantChar() and + char.regexpMatch("\\u000b|\\u000c") // \v|\f (vertical tab | form feed) + ) + or + clazz = "w" and + char = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_".charAt(_) + } + + /** + * An implementation of `CharacterClass` for \d, \s, and \w. + */ + private class PositiveCharacterClassEscape extends CharacterClass { + RegExpCharacterClassEscape cc; + + PositiveCharacterClassEscape() { + this = getCanonicalCharClass(cc) and cc.getValue() = ["d", "s", "w"] + } + + override string getARelevantChar() { + cc.getValue() = "d" and + result = ["0", "9"] + or + cc.getValue() = "s" and + result = [" "] + or + cc.getValue() = "w" and + result = ["a", "Z", "_", "0", "9"] + } + + override predicate matches(string char) { classEscapeMatches(cc.getValue(), char) } + + override string choose() { + cc.getValue() = "d" and + result = "9" + or + cc.getValue() = "s" and + result = [" "] + or + cc.getValue() = "w" and + result = "a" + } + } + + /** + * An implementation of `CharacterClass` for \D, \S, and \W. + */ + private class NegativeCharacterClassEscape extends CharacterClass { + RegExpCharacterClassEscape cc; + + NegativeCharacterClassEscape() { + this = getCanonicalCharClass(cc) and cc.getValue() = ["D", "S", "W"] + } + + override string getARelevantChar() { + cc.getValue() = "D" and + result = ["a", "Z", "!"] + or + cc.getValue() = "S" and + result = ["a", "9", "!"] + or + cc.getValue() = "W" and + result = [" ", "!"] + } + + bindingset[char] + override predicate matches(string char) { + not classEscapeMatches(cc.getValue().toLowerCase(), char) + } + } +} + +private class EdgeLabel extends TInputSymbol { + string toString() { + this = Epsilon() and result = "" + or + exists(InputSymbol s | this = s and result = s.toString()) + } +} + +/** + * Gets the state before matching `t`. + */ +pragma[inline] +private State before(RegExpTerm t) { result = Match(t, 0) } + +/** + * Gets a state the NFA may be in after matching `t`. + */ +private State after(RegExpTerm t) { + exists(RegExpAlt alt | t = alt.getAChild() | result = after(alt)) + or + exists(RegExpSequence seq, int i | t = seq.getChild(i) | + result = before(seq.getChild(i + 1)) + or + i + 1 = seq.getNumChild() and result = after(seq) + ) + or + exists(RegExpGroup grp | t = grp.getAChild() | result = after(grp)) + or + exists(RegExpStar star | t = star.getAChild() | result = before(star)) + or + exists(RegExpPlus plus | t = plus.getAChild() | + result = before(plus) or + result = after(plus) + ) + or + exists(RegExpOpt opt | t = opt.getAChild() | result = after(opt)) + or + exists(RegExpRoot root | t = root | result = AcceptAnySuffix(root)) +} + +/** + * Holds if the NFA has a transition from `q1` to `q2` labelled with `lbl`. + */ +predicate delta(State q1, EdgeLabel lbl, State q2) { + exists(RegexpCharacterConstant s, int i | + q1 = Match(s, i) and + lbl = Char(s.getValue().charAt(i)) and + ( + q2 = Match(s, i + 1) + or + s.getValue().length() = i + 1 and + q2 = after(s) + ) + ) + or + exists(RegExpDot dot | q1 = before(dot) and q2 = after(dot) | + if dot.getLiteral().isDotAll() then lbl = Any() else lbl = Dot() + ) + or + exists(RegExpCharacterClass cc | + cc.isUniversalClass() and q1 = before(cc) and lbl = Any() and q2 = after(cc) + or + q1 = before(cc) and + lbl = CharClass(cc.getRawValue()) and + q2 = after(cc) + ) + or + exists(RegExpCharacterClassEscape cc | + q1 = before(cc) and + lbl = CharClass(cc.getRawValue()) and + q2 = after(cc) + ) + or + exists(RegExpAlt alt | lbl = Epsilon() | q1 = before(alt) and q2 = before(alt.getAChild())) + or + exists(RegExpSequence seq | lbl = Epsilon() | q1 = before(seq) and q2 = before(seq.getChild(0))) + or + exists(RegExpGroup grp | lbl = Epsilon() | q1 = before(grp) and q2 = before(grp.getChild(0))) + or + exists(RegExpStar star | lbl = Epsilon() | + q1 = before(star) and q2 = before(star.getChild(0)) + or + q1 = before(star) and q2 = after(star) + ) + or + exists(RegExpPlus plus | lbl = Epsilon() | q1 = before(plus) and q2 = before(plus.getChild(0))) + or + exists(RegExpOpt opt | lbl = Epsilon() | + q1 = before(opt) and q2 = before(opt.getChild(0)) + or + q1 = before(opt) and q2 = after(opt) + ) + or + exists(RegExpRoot root | q1 = AcceptAnySuffix(root) | + lbl = Any() and q2 = q1 + or + lbl = Epsilon() and q2 = Accept(root) + ) + or + exists(RegExpRoot root | q1 = Match(root, 0) | lbl = Any() and q2 = q1) + or + exists(RegExpDollar dollar | q1 = before(dollar) | + lbl = Epsilon() and q2 = Accept(getRoot(dollar)) + ) +} + +/** + * Gets a state that `q` has an epsilon transition to. + */ +State epsilonSucc(State q) { delta(q, Epsilon(), result) } + +/** + * Gets a state that has an epsilon transition to `q`. + */ +State epsilonPred(State q) { q = epsilonSucc(result) } + +/** + * Holds if there is a state `q` that can be reached from `q1` + * along epsilon edges, such that there is a transition from + * `q` to `q2` that consumes symbol `s`. + */ +predicate deltaClosed(State q1, InputSymbol s, State q2) { delta(epsilonSucc*(q1), s, q2) } + +/** + * Gets the root containing the given term, that is, the root of the literal, + * or a branch of the root disjunction. + */ +RegExpRoot getRoot(RegExpTerm term) { + result = term or + result = getRoot(term.getParent()) +} + +private newtype TState = + Match(RegExpTerm t, int i) { + getRoot(t).isRelevant() and + ( + i = 0 + or + exists(t.(RegexpCharacterConstant).getValue().charAt(i)) + ) + } or + Accept(RegExpRoot l) { l.isRelevant() } or + AcceptAnySuffix(RegExpRoot l) { l.isRelevant() } + +/** + * Gets a state that is about to match the regular expression `t`. + */ +State mkMatch(RegExpTerm t) { result = Match(t, 0) } + +/** + * A state in the NFA corresponding to a regular expression. + * + * Each regular expression literal `l` has one accepting state + * `Accept(l)`, one state that accepts all suffixes `AcceptAnySuffix(l)`, + * and a state `Match(t, i)` for every subterm `t`, + * which represents the state of the NFA before starting to + * match `t`, or the `i`th character in `t` if `t` is a constant. + */ +class State extends TState { + RegExpTerm repr; + + State() { + this = Match(repr, _) or + this = Accept(repr) or + this = AcceptAnySuffix(repr) + } + + /** + * Gets a string representation for this state in a regular expression. + */ + string toString() { + exists(int i | this = Match(repr, i) | result = "Match(" + repr + "," + i + ")") + or + this instanceof Accept and + result = "Accept(" + repr + ")" + or + this instanceof AcceptAnySuffix and + result = "AcceptAny(" + repr + ")" + } + + /** + * Gets the location for this state. + */ + Location getLocation() { result = repr.getLocation() } + + /** + * Gets the term represented by this state. + */ + RegExpTerm getRepr() { result = repr } +} + +/** + * Gets the minimum char that is matched by both the character classes `c` and `d`. + */ +private string getMinOverlapBetweenCharacterClasses(CharacterClass c, CharacterClass d) { + result = min(getAOverlapBetweenCharacterClasses(c, d)) +} + +/** + * Gets a char that is matched by both the character classes `c` and `d`. + * And `c` and `d` is not the same character class. + */ +private string getAOverlapBetweenCharacterClasses(CharacterClass c, CharacterClass d) { + sharesRoot(c, d) and + result = [c.getARelevantChar(), d.getARelevantChar()] and + c.matches(result) and + d.matches(result) and + not c = d +} + +/** + * Gets a character that is represented by both `c` and `d`. + */ +string intersect(InputSymbol c, InputSymbol d) { + (sharesRoot(c, d) or [c, d] = Any()) and + ( + c = Char(result) and + d = getAnInputSymbolMatching(result) + or + result = getMinOverlapBetweenCharacterClasses(c, d) + or + result = c.(CharacterClass).choose() and + ( + d = c + or + d = Dot() and + not (result = "\n" or result = "\r") + or + d = Any() + ) + or + (c = Dot() or c = Any()) and + (d = Dot() or d = Any()) and + result = "a" + ) + or + result = intersect(d, c) +} + +/** + * Gets a symbol that matches `char`. + */ +bindingset[char] +InputSymbol getAnInputSymbolMatching(string char) { + result = Char(char) + or + result.(CharacterClass).matches(char) + or + result = Dot() and + not (char = "\n" or char = "\r") + or + result = Any() +} + +/** + * Predicates for constructing a prefix string that leads to a given state. + */ +private module PrefixConstruction { + /** + * Holds if `state` starts the string matched by the regular expression. + */ + private predicate isStartState(State state) { + state instanceof StateInPumpableRegexp and + ( + state = Match(any(RegExpRoot r), _) + or + exists(RegExpCaret car | state = after(car)) + ) + } + + /** + * Holds if `state` is the textually last start state for the regular expression. + */ + private predicate lastStartState(State state) { + exists(RegExpRoot root | + state = + max(State s, Location l | + isStartState(s) and getRoot(s.getRepr()) = root and l = s.getRepr().getLocation() + | + s + order by + l.getStartLine(), l.getStartColumn(), s.getRepr().toString(), l.getEndColumn(), + l.getEndLine() + ) + ) + } + + /** + * Holds if there exists any transition (Epsilon() or other) from `a` to `b`. + */ + private predicate existsTransition(State a, State b) { delta(a, _, b) } + + /** + * Gets the minimum number of transitions it takes to reach `state` from the `start` state. + */ + int prefixLength(State start, State state) = + shortestDistances(lastStartState/1, existsTransition/2)(start, state, result) + + /** + * Gets the minimum number of transitions it takes to reach `state` from the start state. + */ + private int lengthFromStart(State state) { result = prefixLength(_, state) } + + /** + * Gets a string for which the regular expression will reach `state`. + * + * Has at most one result for any given `state`. + * This predicate will not always have a result even if there is a ReDoS issue in + * the regular expression. + */ + string prefix(State state) { + lastStartState(state) and + result = "" + or + // the search stops past the last redos candidate state. + lengthFromStart(state) <= max(lengthFromStart(any(State s | isReDoSCandidate(s, _)))) and + exists(State prev | + // select a unique predecessor (by an arbitrary measure) + prev = + min(State s, Location loc | + lengthFromStart(s) = lengthFromStart(state) - 1 and + loc = s.getRepr().getLocation() and + delta(s, _, state) + | + s + order by + loc.getStartLine(), loc.getStartColumn(), loc.getEndLine(), loc.getEndColumn(), + s.getRepr().toString() + ) + | + // greedy search for the shortest prefix + result = prefix(prev) and delta(prev, Epsilon(), state) + or + not delta(prev, Epsilon(), state) and + result = prefix(prev) + getCanonicalEdgeChar(prev, state) + ) + } + + /** + * Gets a canonical char for which there exists a transition from `prev` to `next` in the NFA. + */ + private string getCanonicalEdgeChar(State prev, State next) { + result = + min(string c | delta(prev, any(InputSymbol symbol | c = intersect(Any(), symbol)), next)) + } + + /** + * A state within a regular expression that has a pumpable state. + */ + class StateInPumpableRegexp extends State { + pragma[noinline] + StateInPumpableRegexp() { + exists(State s | isReDoSCandidate(s, _) | getRoot(s.getRepr()) = getRoot(this.getRepr())) + } + } +} + +/** + * Predicates for testing the presence of a rejecting suffix. + * + * These predicates are used to ensure that the all states reached from the fork + * by repeating `w` have a rejecting suffix. + * + * For example, a regexp like `/^(a+)+/` will accept any string as long the prefix is + * some number of `"a"`s, and it is therefore not possible to construct a rejecting suffix. + * + * A regexp like `/(a+)+$/` or `/(a+)+b/` trivially has a rejecting suffix, + * as the suffix "X" will cause both the regular expressions to be rejected. + * + * The string `w` is repeated any number of times because it needs to be + * infinitely repeatedable for the attack to work. + * For the regular expression `/((ab)+)*abab/` the accepting state is not reachable from the fork + * using epsilon transitions. But any attempt at repeating `w` will end in a state that accepts all suffixes. + */ +private module SuffixConstruction { + import PrefixConstruction + + /** + * Holds if all states reachable from `fork` by repeating `w` + * are likely rejectable by appending some suffix. + */ + predicate reachesOnlyRejectableSuffixes(State fork, string w) { + isReDoSCandidate(fork, w) and + forex(State next | next = process(fork, w, w.length() - 1) | isLikelyRejectable(next)) + } + + /** + * Holds if there likely exists a suffix starting from `s` that leads to the regular expression being rejected. + * This predicate might find impossible suffixes when searching for suffixes of length > 1, which can cause FPs. + */ + pragma[noinline] + private predicate isLikelyRejectable(StateInPumpableRegexp s) { + // exists a reject edge with some char. + hasRejectEdge(s) + or + hasEdgeToLikelyRejectable(s) + or + // stopping here is rejection + isRejectState(s) + } + + /** + * Holds if `s` is not an accept state, and there is no epsilon transition to an accept state. + */ + predicate isRejectState(StateInPumpableRegexp s) { not epsilonSucc*(s) = Accept(_) } + + /** + * Holds if there is likely a non-empty suffix leading to rejection starting in `s`. + */ + pragma[noopt] + predicate hasEdgeToLikelyRejectable(StateInPumpableRegexp s) { + // all edges (at least one) with some char leads to another state that is rejectable. + // the `next` states might not share a common suffix, which can cause FPs. + exists(string char | char = hasEdgeToLikelyRejectableHelper(s) | + // noopt to force `hasEdgeToLikelyRejectableHelper` to be first in the join-order. + exists(State next | deltaClosedChar(s, char, next) | isLikelyRejectable(next)) and + forall(State next | deltaClosedChar(s, char, next) | isLikelyRejectable(next)) + ) + } + + /** + * Gets a char for there exists a transition away from `s`, + * and `s` has not been found to be rejectable by `hasRejectEdge` or `isRejectState`. + */ + pragma[noinline] + private string hasEdgeToLikelyRejectableHelper(StateInPumpableRegexp s) { + not hasRejectEdge(s) and + not isRejectState(s) and + deltaClosedChar(s, result, _) + } + + /** + * Holds if there is a state `next` that can be reached from `prev` + * along epsilon edges, such that there is a transition from + * `prev` to `next` that the character symbol `char`. + */ + predicate deltaClosedChar(StateInPumpableRegexp prev, string char, StateInPumpableRegexp next) { + deltaClosed(prev, getAnInputSymbolMatchingRelevant(char), next) + } + + pragma[noinline] + InputSymbol getAnInputSymbolMatchingRelevant(string char) { + char = relevant(_) and + result = getAnInputSymbolMatching(char) + } + + /** + * Gets a char used for finding possible suffixes inside `root`. + */ + pragma[noinline] + private string relevant(RegExpRoot root) { + exists(ascii(result)) + or + exists(InputSymbol s | belongsTo(s, root) | result = intersect(s, _)) + or + // The characters from `hasSimpleRejectEdge`. Only `\n` is really needed (as `\n` is not in the `ascii` relation). + // The three chars must be kept in sync with `hasSimpleRejectEdge`. + result = ["|", "\n", "Z"] + } + + /** + * Holds if there exists a `char` such that there is no edge from `s` labeled `char` in our NFA. + * The NFA does not model reject states, so the above is the same as saying there is a reject edge. + */ + private predicate hasRejectEdge(State s) { + hasSimpleRejectEdge(s) + or + not hasSimpleRejectEdge(s) and + exists(string char | char = relevant(getRoot(s.getRepr())) | not deltaClosedChar(s, char, _)) + } + + /** + * Holds if there is no edge from `s` labeled with "|", "\n", or "Z" in our NFA. + * This predicate is used as a cheap pre-processing to speed up `hasRejectEdge`. + */ + private predicate hasSimpleRejectEdge(State s) { + // The three chars were chosen arbitrarily. The three chars must be kept in sync with `relevant`. + exists(string char | char = ["|", "\n", "Z"] | not deltaClosedChar(s, char, _)) + } + + /** + * Gets a state that can be reached from pumpable `fork` consuming all + * chars in `w` any number of times followed by the first `i+1` characters of `w`. + */ + pragma[noopt] + private State process(State fork, string w, int i) { + exists(State prev | prev = getProcessPrevious(fork, i, w) | + exists(string char, InputSymbol sym | + char = w.charAt(i) and + deltaClosed(prev, sym, result) and + // noopt to prevent joining `prev` with all possible `chars` that could transition away from `prev`. + // Instead only join with the set of `chars` where a relevant `InputSymbol` has already been found. + sym = getAProcessInputSymbol(char) + ) + ) + } + + /** + * Gets a state that can be reached from pumpable `fork` consuming all + * chars in `w` any number of times followed by the first `i` characters of `w`. + */ + private State getProcessPrevious(State fork, int i, string w) { + isReDoSCandidate(fork, w) and + ( + i = 0 and result = fork + or + result = process(fork, w, i - 1) + or + // repeat until fixpoint + i = 0 and + result = process(fork, w, w.length() - 1) + ) + } + + /** + * Gets an InputSymbol that matches `char`. + * The predicate is specialized to only have a result for the `char`s that are relevant for the `process` predicate. + */ + private InputSymbol getAProcessInputSymbol(string char) { + char = getAProcessChar() and + result = getAnInputSymbolMatching(char) + } + + /** + * Gets a `char` that occurs in a `pump` string. + */ + private string getAProcessChar() { result = any(string s | isReDoSCandidate(_, s)).charAt(_) } +} + +/** + * Gets the result of backslash-escaping newlines, carriage-returns and + * backslashes in `s`. + */ +bindingset[s] +private string escape(string s) { + result = + s.replaceAll("\\", "\\\\") + .replaceAll("\n", "\\n") + .replaceAll("\r", "\\r") + .replaceAll("\t", "\\t") +} + +/** + * Gets `str` with the last `i` characters moved to the front. + * + * We use this to adjust the pump string to match with the beginning of + * a RegExpTerm, so it doesn't start in the middle of a constant. + */ +bindingset[str, i] +private string rotate(string str, int i) { + result = str.suffix(str.length() - i) + str.prefix(str.length() - i) +} + +/** + * Holds if `term` may cause superlinear backtracking on strings containing many repetitions of `pump`. + * Gets the shortest string that causes superlinear backtracking. + */ +private predicate isReDoSAttackable(RegExpTerm term, string pump, State s) { + exists(int i, string c | s = Match(term, i) | + c = + min(string w | + any(ReDoSConfiguration conf).isReDoSCandidate(s, w) and + SuffixConstruction::reachesOnlyRejectableSuffixes(s, w) + | + w order by w.length(), w + ) and + pump = escape(rotate(c, i)) + ) +} + +/** + * Holds if the state `s` (represented by the term `t`) can have backtracking with repetitions of `pump`. + * + * `prefixMsg` contains a friendly message for a prefix that reaches `s` (or `prefixMsg` is the empty string if the prefix is empty or if no prefix could be found). + */ +predicate hasReDoSResult(RegExpTerm t, string pump, State s, string prefixMsg) { + isReDoSAttackable(t, pump, s) and + ( + prefixMsg = "starting with '" + escape(PrefixConstruction::prefix(s)) + "' and " and + not PrefixConstruction::prefix(s) = "" + or + PrefixConstruction::prefix(s) = "" and prefixMsg = "" + or + not exists(PrefixConstruction::prefix(s)) and prefixMsg = "" + ) +} diff --git a/python/ql/src/semmle/python/regex/RegExpTreeView.qll b/python/ql/src/semmle/python/regex/RegExpTreeView.qll new file mode 100644 index 000000000000..addd192cafaa --- /dev/null +++ b/python/ql/src/semmle/python/regex/RegExpTreeView.qll @@ -0,0 +1,15 @@ +/** + * This module should provide a class hierarchy corresponding to a parse tree of regular expressions. + */ + +import python +import semmle.python.RegexTreeView + +/** + * Holds if the regular expression should not be considered. + * + * For javascript we make the pragmatic performance optimization to ignore files we did not extract. + */ +predicate isExcluded(RegExpParent parent) { + not exists(parent.getRegex().getLocation().getFile().getRelativePath()) +} diff --git a/python/ql/src/semmle/python/regex/SuperlinearBackTracking.qll b/python/ql/src/semmle/python/regex/SuperlinearBackTracking.qll new file mode 100644 index 000000000000..0bbff12b49d7 --- /dev/null +++ b/python/ql/src/semmle/python/regex/SuperlinearBackTracking.qll @@ -0,0 +1,449 @@ +/** + * Provides classes for working with regular expressions that can + * perform backtracking in superlinear time. + */ + +import ReDoSUtil + +/* + * This module implements the analysis described in the paper: + * Valentin Wustholz, Oswaldo Olivo, Marijn J. H. Heule, and Isil Dillig: + * Static Detection of DoS Vulnerabilities in + * Programs that use Regular Expressions + * (Extended Version). + * (https://arxiv.org/pdf/1701.04045.pdf) + * + * Theorem 3 from the paper describes the basic idea. + * + * The following explains the idea using variables and predicate names that are used in the implementation: + * We consider a pair of repetitions, which we will call `pivot` and `succ`. + * + * We create a product automaton of 3-tuples of states (see `StateTuple`). + * There exists a transition `(a,b,c) -> (d,e,f)` in the product automaton + * iff there exists three transitions in the NFA `a->d, b->e, c->f` where those three + * transitions all match a shared character `char`. (see `getAThreewayIntersect`) + * + * We start a search in the product automaton at `(pivot, pivot, succ)`, + * and search for a series of transitions (a `Trace`), such that we end + * at `(pivot, succ, succ)` (see `isReachableFromStartTuple`). + * + * For example, consider the regular expression `/^\d*5\w*$/`. + * The search will start at the tuple `(\d*, \d*, \w*)` and search + * for a path to `(\d*, \w*, \w*)`. + * This path exists, and consists of a single transition in the product automaton, + * where the three corresponding NFA edges all match the character `"5"`. + * + * The start-state in the NFA has an any-transition to itself, this allows us to + * flag regular expressions such as `/a*$/` - which does not have a start anchor - + * and can thus start matching anywhere. + * + * The implementation is not perfect. + * It has the same suffix detection issue as the `js/redos` query, which can cause false positives. + * It also doesn't find all transitions in the product automaton, which can cause false negatives. + */ + +/** + * An instantiaion of `ReDoSConfiguration` for superlinear ReDoS. + */ +class SuperLinearReDoSConfiguration extends ReDoSConfiguration { + SuperLinearReDoSConfiguration() { this = "SuperLinearReDoSConfiguration" } + + override predicate isReDoSCandidate(State state, string pump) { isPumpable(_, state, pump) } +} + +/** + * Gets any root (start) state of a regular expression. + */ +private State getRootState() { result = mkMatch(any(RegExpRoot r)) } + +private newtype TStateTuple = + MkStateTuple(State q1, State q2, State q3) { + // starts at (pivot, pivot, succ) + isStartLoops(q1, q3) and q1 = q2 + or + step(_, _, _, _, q1, q2, q3) and FeasibleTuple::isFeasibleTuple(q1, q2, q3) + } + +/** + * A state in the product automaton. + * The product automaton contains 3-tuples of states. + * + * We lazily only construct those states that we are actually + * going to need. + * Either a start state `(pivot, pivot, succ)`, or a state + * where there exists a transition from an already existing state. + * + * The exponential variant of this query (`js/redos`) uses an optimization + * trick where `q1 <= q2`. This trick cannot be used here as the order + * of the elements matter. + */ +class StateTuple extends TStateTuple { + State q1; + State q2; + State q3; + + StateTuple() { this = MkStateTuple(q1, q2, q3) } + + /** + * Gest a string repesentation of this tuple. + */ + string toString() { result = "(" + q1 + ", " + q2 + ", " + q3 + ")" } + + /** + * Holds if this tuple is `(r1, r2, r3)`. + */ + pragma[noinline] + predicate isTuple(State r1, State r2, State r3) { r1 = q1 and r2 = q2 and r3 = q3 } +} + +/** + * A module for determining feasible tuples for the product automaton. + * + * The implementation is split into many predicates for performance reasons. + */ +private module FeasibleTuple { + /** + * Holds if the tuple `(r1, r2, r3)` might be on path from a start-state to an end-state in the product automaton. + */ + pragma[inline] + predicate isFeasibleTuple(State r1, State r2, State r3) { + // The first element is either inside a repetition (or the start state itself) + isRepetitionOrStart(r1) and + // The last element is inside a repetition + stateInsideRepetition(r3) and + // The states are reachable in the NFA in the order r1 -> r2 -> r3 + delta+(r1) = r2 and + delta+(r2) = r3 and + // The first element can reach a beginning (the "pivot" state in a `(pivot, succ)` pair). + canReachABeginning(r1) and + // The last element can reach a target (the "succ" state in a `(pivot, succ)` pair). + canReachATarget(r3) + } + + /** + * Holds if `s` is either inside a repetition, or is the start state (which is a repetition). + */ + pragma[noinline] + private predicate isRepetitionOrStart(State s) { stateInsideRepetition(s) or s = getRootState() } + + /** + * Holds if state `s` might be inside a backtracking repetition. + */ + pragma[noinline] + private predicate stateInsideRepetition(State s) { + s.getRepr().getParent*() instanceof InfiniteRepetitionQuantifier + } + + /** + * Holds if there exists a path in the NFA from `s` to a "pivot" state + * (from a `(pivot, succ)` pair that starts the search). + */ + pragma[noinline] + private predicate canReachABeginning(State s) { + delta+(s) = any(State pivot | isStartLoops(pivot, _)) + } + + /** + * Holds if there exists a path in the NFA from `s` to a "succ" state + * (from a `(pivot, succ)` pair that starts the search). + */ + pragma[noinline] + private predicate canReachATarget(State s) { delta+(s) = any(State succ | isStartLoops(_, succ)) } +} + +/** + * Holds if `pivot` and `succ` are a pair of loops that could be the beginning of a quadratic blowup. + * + * There is a slight implementation difference compared to the paper: this predicate requires that `pivot != succ`. + * The case where `pivot = succ` causes exponential backtracking and is handled by the `js/redos` query. + */ +predicate isStartLoops(State pivot, State succ) { + pivot != succ and + succ.getRepr() instanceof InfiniteRepetitionQuantifier and + delta+(pivot) = succ and + ( + pivot.getRepr() instanceof InfiniteRepetitionQuantifier + or + pivot = mkMatch(any(RegExpRoot root)) + ) +} + +/** + * Gets a state for which there exists a transition in the NFA from `s'. + */ +State delta(State s) { delta(s, _, result) } + +/** + * Holds if there are transitions from the components of `q` to the corresponding + * components of `r` labelled with `s1`, `s2`, and `s3`, respectively. + */ +pragma[noinline] +predicate step(StateTuple q, InputSymbol s1, InputSymbol s2, InputSymbol s3, StateTuple r) { + exists(State r1, State r2, State r3 | + step(q, s1, s2, s3, r1, r2, r3) and r = MkStateTuple(r1, r2, r3) + ) +} + +/** + * Holds if there are transitions from the components of `q` to `r1`, `r2`, and `r3 + * labelled with `s1`, `s2`, and `s3`, respectively. + */ +pragma[noopt] +predicate step( + StateTuple q, InputSymbol s1, InputSymbol s2, InputSymbol s3, State r1, State r2, State r3 +) { + exists(State q1, State q2, State q3 | q.isTuple(q1, q2, q3) | + deltaClosed(q1, s1, r1) and + deltaClosed(q2, s2, r2) and + deltaClosed(q3, s3, r3) and + // use noopt to force the join on `getAThreewayIntersect` to happen last. + exists(getAThreewayIntersect(s1, s2, s3)) + ) +} + +/** + * Gets a char that is matched by all the edges `s1`, `s2`, and `s3`. + * + * The result is not complete, and might miss some combination of edges that share some character. + */ +pragma[noinline] +string getAThreewayIntersect(InputSymbol s1, InputSymbol s2, InputSymbol s3) { + result = minAndMaxIntersect(s1, s2) and result = [intersect(s2, s3), intersect(s1, s3)] + or + result = minAndMaxIntersect(s1, s3) and result = [intersect(s2, s3), intersect(s1, s2)] + or + result = minAndMaxIntersect(s2, s3) and result = [intersect(s1, s2), intersect(s1, s3)] +} + +/** + * Gets the minimum and maximum characters that intersect between `a` and `b`. + * This predicate is used to limit the size of `getAThreewayIntersect`. + */ +pragma[noinline] +string minAndMaxIntersect(InputSymbol a, InputSymbol b) { + result = [min(intersect(a, b)), max(intersect(a, b))] +} + +private newtype TTrace = + Nil() or + Step(InputSymbol s1, InputSymbol s2, InputSymbol s3, TTrace t) { + exists(StateTuple p | + isReachableFromStartTuple(_, _, p, t, _) and + step(p, s1, s2, s3, _) + ) + or + exists(State pivot, State succ | isStartLoops(pivot, succ) | + t = Nil() and step(MkStateTuple(pivot, pivot, succ), s1, s2, s3, _) + ) + } + +/** + * A list of tuples of input symbols that describe a path in the product automaton + * starting from some start state. + */ +class Trace extends TTrace { + /** + * Gets a string representation of this Trace that can be used for debug purposes. + */ + string toString() { + this = Nil() and result = "Nil()" + or + exists(InputSymbol s1, InputSymbol s2, InputSymbol s3, Trace t | this = Step(s1, s2, s3, t) | + result = "Step(" + s1 + ", " + s2 + ", " + s3 + ", " + t + ")" + ) + } +} + +/** + * Gets a string corresponding to the trace `t`. + */ +string concretise(Trace t) { + t = Nil() and result = "" + or + exists(InputSymbol s1, InputSymbol s2, InputSymbol s3, Trace rest | t = Step(s1, s2, s3, rest) | + result = concretise(rest) + getAThreewayIntersect(s1, s2, s3) + ) +} + +/** + * Holds if there exists a transition from `r` to `q` in the product automaton. + * Notice that the arguments are flipped, and thus the direction is backwards. + */ +pragma[noinline] +predicate tupleDeltaBackwards(StateTuple q, StateTuple r) { step(r, _, _, _, q) } + +/** + * Holds if `tuple` is an end state in our search. + * That means there exists a pair of loops `(pivot, succ)` such that `tuple = (pivot, succ, succ)`. + */ +predicate isEndTuple(StateTuple tuple) { tuple = getAnEndTuple(_, _) } + +/** + * Gets the minimum length of a path from `r` to some an end state `end`. + * + * The implementation searches backwards from the end-tuple. + * This approach was chosen because it is way more efficient if the first predicate given to `shortestDistances` is small. + * The `end` argument must always be an end state. + */ +int distBackFromEnd(StateTuple r, StateTuple end) = + shortestDistances(isEndTuple/1, tupleDeltaBackwards/2)(end, r, result) + +/** + * Holds if there exists a pair of repetitions `(pivot, succ)` in the regular expression such that: + * `tuple` is reachable from `(pivot, pivot, succ)` in the product automaton, + * and there is a distance of `dist` from `tuple` to the nearest end-tuple `(pivot, succ, succ)`, + * and a path from a start-state to `tuple` follows the transitions in `trace`. + */ +predicate isReachableFromStartTuple(State pivot, State succ, StateTuple tuple, Trace trace, int dist) { + // base case. The first step is inlined to start the search after all possible 1-steps, and not just the ones with the shortest path. + exists(InputSymbol s1, InputSymbol s2, InputSymbol s3, State q1, State q2, State q3 | + isStartLoops(pivot, succ) and + step(MkStateTuple(pivot, pivot, succ), s1, s2, s3, tuple) and + tuple = MkStateTuple(q1, q2, q3) and + trace = Step(s1, s2, s3, Nil()) and + dist = distBackFromEnd(tuple, MkStateTuple(pivot, succ, succ)) + ) + or + // recursive case + exists(StateTuple p, Trace v, InputSymbol s1, InputSymbol s2, InputSymbol s3 | + isReachableFromStartTuple(pivot, succ, p, v, dist + 1) and + dist = isReachableFromStartTupleHelper(pivot, succ, tuple, p, s1, s2, s3) and + trace = Step(s1, s2, s3, v) + ) +} + +/** + * Helper predicate for the recursive case in `isReachableFromStartTuple`. + */ +pragma[noinline] +private int isReachableFromStartTupleHelper( + State pivot, State succ, StateTuple r, StateTuple p, InputSymbol s1, InputSymbol s2, + InputSymbol s3 +) { + result = distBackFromEnd(r, MkStateTuple(pivot, succ, succ)) and + step(p, s1, s2, s3, r) +} + +/** + * Gets the tuple `(pivot, succ, succ)` from the product automaton. + */ +StateTuple getAnEndTuple(State pivot, State succ) { + isStartLoops(pivot, succ) and + result = MkStateTuple(pivot, succ, succ) +} + +/** + * Holds if matching repetitions of `pump` can: + * 1) Transition from `pivot` back to `pivot`. + * 2) Transition from `pivot` to `succ`. + * 3) Transition from `succ` to `succ`. + * + * From theorem 3 in the paper linked in the top of this file we can therefore conclude that + * the regular expression has polynomial backtracking - if a rejecting suffix exists. + * + * This predicate is used by `SuperLinearReDoSConfiguration`, and the final results are + * available in the `hasReDoSResult` predicate. + */ +predicate isPumpable(State pivot, State succ, string pump) { + exists(StateTuple q, Trace t | + isReachableFromStartTuple(pivot, succ, q, t, _) and + q = getAnEndTuple(pivot, succ) and + pump = concretise(t) + ) +} + +/** + * Holds if repetitions of `pump` at `t` will cause polynomial backtracking. + */ +predicate polynimalReDoS(RegExpTerm t, string pump, string prefixMsg, RegExpTerm prev) { + exists(State s, State pivot | + hasReDoSResult(t, pump, s, prefixMsg) and + isPumpable(pivot, s, _) and + prev = pivot.getRepr() + ) +} + +/** + * Holds if `t` matches at least an epsilon symbol. + * + * That is, this term does not restrict the language of the enclosing regular expression. + * + * This is implemented as an under-approximation, and this predicate does not hold for sub-patterns in particular. + */ +private predicate matchesEpsilon(RegExpTerm t) { + t instanceof RegExpStar + or + t instanceof RegExpOpt + or + t.(RegExpRange).getLowerBound() = 0 + or + exists(RegExpTerm child | + child = t.getAChild() and + matchesEpsilon(child) + | + t instanceof RegExpAlt or + t instanceof RegExpGroup or + t instanceof RegExpPlus or + t instanceof RegExpRange + ) + or + matchesEpsilon(t.(RegExpBackRef).getGroup()) + or + forex(RegExpTerm child | child = t.(RegExpSequence).getAChild() | matchesEpsilon(child)) +} + +/** + * Gets a message for why `term` can cause polynomial backtracking. + */ +string getReasonString(RegExpTerm term, string pump, string prefixMsg, RegExpTerm prev) { + polynimalReDoS(term, pump, prefixMsg, prev) and + result = + "Strings " + prefixMsg + "with many repetitions of '" + pump + + "' can start matching anywhere after the start of the preceeding " + prev +} + +/** + * A term that may cause a regular expression engine to perform a + * polynomial number of match attempts, relative to the input length. + */ +class PolynomialBackTrackingTerm extends InfiniteRepetitionQuantifier { + string reason; + string pump; + string prefixMsg; + RegExpTerm prev; + + PolynomialBackTrackingTerm() { + reason = getReasonString(this, pump, prefixMsg, prev) and + // there might be many reasons for this term to have polynomial backtracking - we pick the shortest one. + reason = min(string msg | msg = getReasonString(this, _, _, _) | msg order by msg.length(), msg) + } + + /** + * Holds if all non-empty successors to the polynomial backtracking term matches the end of the line. + */ + predicate isAtEndLine() { + forall(RegExpTerm succ | this.getSuccessor+() = succ and not matchesEpsilon(succ) | + succ instanceof RegExpDollar + ) + } + + /** + * Gets the string that should be repeated to cause this regular expression to perform polynomially. + */ + string getPumpString() { result = pump } + + /** + * Gets a message for which prefix a matching string must start with for this term to cause polynomial backtracking. + */ + string getPrefixMessage() { result = prefixMsg } + + /** + * Gets a predecessor to `this`, which also loops on the pump string, and thereby causes polynomial backtracking. + */ + RegExpTerm getPreviousLoop() { result = prev } + + /** + * Gets the reason for the number of match attempts. + */ + string getReason() { result = reason } +} diff --git a/python/ql/src/semmle/python/security/dataflow/PolynomialReDoS.qll b/python/ql/src/semmle/python/security/dataflow/PolynomialReDoS.qll new file mode 100644 index 000000000000..eace61442eda --- /dev/null +++ b/python/ql/src/semmle/python/security/dataflow/PolynomialReDoS.qll @@ -0,0 +1,177 @@ +/** + * Provides a taint-tracking configuration for detecting polynomial regular expression denial of service (ReDoS) + * vulnerabilities. + */ + +import python +import semmle.python.dataflow.new.DataFlow +import semmle.python.dataflow.new.DataFlow2 +import semmle.python.dataflow.new.TaintTracking +import semmle.python.Concepts +import semmle.python.dataflow.new.RemoteFlowSources +import semmle.python.dataflow.new.BarrierGuards +import semmle.python.RegexTreeView +import semmle.python.ApiGraphs + +/** A configuration for finding uses of compiled regexes. */ +class RegexDefinitionConfiguration extends DataFlow2::Configuration { + RegexDefinitionConfiguration() { this = "RegexDefinitionConfiguration" } + + override predicate isSource(DataFlow::Node source) { source instanceof RegexDefinitonSource } + + override predicate isSink(DataFlow::Node sink) { sink instanceof RegexDefinitionSink } +} + +/** A regex compilation. */ +class RegexDefinitonSource extends DataFlow::CallCfgNode { + DataFlow::Node regexNode; + + RegexDefinitonSource() { + this = API::moduleImport("re").getMember("compile").getACall() and + regexNode in [this.getArg(0), this.getArgByName("pattern")] + } + + /** Gets the regex that is being compiled by this node. */ + RegExpTerm getRegExp() { result.getRegex() = regexNode.asExpr() and result.isRootTerm() } + + /** Gets the data flow node for the regex being compiled by this node. */ + DataFlow::Node getRegexNode() { result = regexNode } +} + +/** A use of a compiled regex. */ +class RegexDefinitionSink extends DataFlow::Node { + RegexExecutionMethod method; + DataFlow::CallCfgNode executingCall; + + RegexDefinitionSink() { + exists(DataFlow::AttrRead reMethod | + executingCall.getFunction() = reMethod and + reMethod.getAttributeName() = method and + this = reMethod.getObject() + ) + } + + /** Gets the method used to execute the regex. */ + RegexExecutionMethod getMethod() { result = method } + + /** Gets the data flow node for the executing call. */ + DataFlow::CallCfgNode getExecutingCall() { result = executingCall } +} + +/** + * A taint-tracking configuration for detecting regular expression denial-of-service vulnerabilities. + */ +class PolynomialReDoSConfiguration extends TaintTracking::Configuration { + PolynomialReDoSConfiguration() { this = "PolynomialReDoSConfiguration" } + + override predicate isSource(DataFlow::Node source) { source instanceof RemoteFlowSource } + + override predicate isSink(DataFlow::Node sink) { sink instanceof PolynomialReDoSSink } +} + +/** A data flow node executing a regex. */ +abstract class RegexExecution extends DataFlow::Node { + /** Gets the data flow node for the regex being compiled by this node. */ + abstract DataFlow::Node getRegexNode(); + + /** Gets a dataflow node for the string to be searched or matched against. */ + abstract DataFlow::Node getString(); +} + +private class RegexExecutionMethod extends string { + RegexExecutionMethod() { + this in ["match", "fullmatch", "search", "split", "findall", "finditer", "sub", "subn"] + } +} + +/** Gets the index of the argument representing the string to be searched by a regex. */ +int stringArg(RegexExecutionMethod method) { + method in ["match", "fullmatch", "search", "split", "findall", "finditer"] and + result = 1 + or + method in ["sub", "subn"] and + result = 2 +} + +/** + * A class to find `re` methods immediately executing an expression. + * + * See `RegexExecutionMethods` + */ +class DirectRegex extends DataFlow::CallCfgNode, RegexExecution { + RegexExecutionMethod method; + + DirectRegex() { this = API::moduleImport("re").getMember(method).getACall() } + + override DataFlow::Node getRegexNode() { + result in [this.getArg(0), this.getArgByName("pattern")] + } + + override DataFlow::Node getString() { + result in [this.getArg(stringArg(method)), this.getArgByName("string")] + } +} + +/** + * A class to find `re` methods immediately executing a compiled expression by `re.compile`. + * + * Given the following example: + * + * ```py + * pattern = re.compile(input) + * pattern.match(s) + * ``` + * + * This class will identify that `re.compile` compiles `input` and afterwards + * executes `re`'s `match`. As a result, `this` will refer to `pattern.match(s)` + * and `this.getRegexNode()` will return the node for `input` (`re.compile`'s first argument) + * + * + * See `RegexExecutionMethods` + * + * See https://docs.python.org/3/library/re.html#regular-expression-objects + */ +private class CompiledRegex extends DataFlow::CallCfgNode, RegexExecution { + DataFlow::Node regexNode; + RegexExecutionMethod method; + + CompiledRegex() { + exists( + RegexDefinitionConfiguration conf, RegexDefinitonSource source, RegexDefinitionSink sink + | + conf.hasFlow(source, sink) and + regexNode = source.getRegexNode() and + method = sink.getMethod() and + this = sink.getExecutingCall() + ) + } + + override DataFlow::Node getRegexNode() { result = regexNode } + + override DataFlow::Node getString() { + result in [this.getArg(stringArg(method) - 1), this.getArgByName("string")] + } +} + +/** + * A data flow sink node for polynomial regular expression denial-of-service vulnerabilities. + */ +class PolynomialReDoSSink extends DataFlow::Node { + RegExpTerm t; + + PolynomialReDoSSink() { + exists(RegexExecution re | + re.getRegexNode().asExpr() = t.getRegex() and + this = re.getString() + ) and + t.isRootTerm() + } + + /** Gets the regex that is being executed by this node. */ + RegExpTerm getRegExp() { result = t } + + /** + * Gets the node to highlight in the alert message. + */ + DataFlow::Node getHighlight() { result = this } +} From 40ac91eecdc93ca938693fe27c579c43d860e2f4 Mon Sep 17 00:00:00 2001 From: Rasmus Lerchedahl Petersen Date: Mon, 28 Jun 2021 17:02:40 +0200 Subject: [PATCH 08/22] Python: Add some tests for exponential ReDoS - `KnownCVEs` contain the currently triaged Python CVEs - `unittest.py` contains some tests constructed by @erik-krogh - `redos.py` contains a port of `tst.js` from javascript The expected file has been ported as well with some fixups by @tausbn --- .../query-tests/Security/CWE-730/KnownCVEs.py | 94 +++++ .../Security/CWE-730/ReDoS.expected | 98 +++++ .../query-tests/Security/CWE-730/ReDoS.qlref | 1 + .../query-tests/Security/CWE-730/redos.py | 368 ++++++++++++++++++ .../query-tests/Security/CWE-730/unittests.py | 9 + 5 files changed, 570 insertions(+) create mode 100644 python/ql/test/query-tests/Security/CWE-730/KnownCVEs.py create mode 100644 python/ql/test/query-tests/Security/CWE-730/ReDoS.expected create mode 100644 python/ql/test/query-tests/Security/CWE-730/ReDoS.qlref create mode 100644 python/ql/test/query-tests/Security/CWE-730/redos.py create mode 100644 python/ql/test/query-tests/Security/CWE-730/unittests.py diff --git a/python/ql/test/query-tests/Security/CWE-730/KnownCVEs.py b/python/ql/test/query-tests/Security/CWE-730/KnownCVEs.py new file mode 100644 index 000000000000..14a8ff528090 --- /dev/null +++ b/python/ql/test/query-tests/Security/CWE-730/KnownCVEs.py @@ -0,0 +1,94 @@ +import re + +# linear +# https://github.com/github/codeql-python-CVE-coverage/issues/439 +rex_blame = re.compile(r'\s*(\d+)\s*(\S+) (.*)') + +# https://github.com/github/codeql-python-CVE-coverage/issues/402 +whitespace = br"[\000\011\012\014\015\040]" +whitespace_optional = whitespace + b"*" +newline_only = br"[\r\n]+" +newline = whitespace_optional + newline_only + whitespace_optional +toFlag = re.compile(newline) + +# https://github.com/github/codeql-python-CVE-coverage/issues/400 +re.compile(r'[+-]?(\d+)*\.\d+%?') +re.compile(r'"""\s+(?:.|\n)*?\s+"""') +re.compile(r'(\{\s+)(\S+)(\s+[^}]+\s+\}\s)') +re.compile(r'".*``.*``.*"') +re.compile(r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)') +re.compile(r'(%config)(\s*\(\s*)(\w+)(\s*=\s*)(.*?)(\s*\)\s*)') +re.compile(r'(%new)(\s*)(\()(\s*.*?\s*)(\))') +re.compile(r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+[^=,%}]+?)?') +re.compile(r'(\.\w+\b)(\s*=\s*)([^;]*)(\s*;)') + +# linear +# https://github.com/github/codeql-python-CVE-coverage/issues/392 +simple_email_re = re.compile(r"^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$") + +# https://github.com/github/codeql-python-CVE-coverage/issues/249 +rx = re.compile('(?:.*,)*[ \t]*([^ \t]+)[ \t]+' + 'realm=(["\']?)([^"\']*)\\2', re.I) + +# https://github.com/github/codeql-python-CVE-coverage/issues/248 +gauntlet = re.compile( + r"""^([-/:,#%.'"\s!\w]|\w-\w|'[\s\w]+'\s*|"[\s\w]+"|\([\d,%\.\s]+\))*$""", + flags=re.U + ) + +# https://github.com/github/codeql-python-CVE-coverage/issues/227 +# from .compat import tobytes + +WS = "[ \t]" +OWS = WS + "{0,}?" + +# RFC 7230 Section 3.2.6 "Field Value Components": +# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" +# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" +# / DIGIT / ALPHA +# obs-text = %x80-FF +TCHAR = r"[!#$%&'*+\-.^_`|~0-9A-Za-z]" +OBS_TEXT = r"\x80-\xff" +TOKEN = TCHAR + "{1,}" +# RFC 5234 Appendix B.1 "Core Rules": +# VCHAR = %x21-7E +# ; visible (printing) characters +VCHAR = r"\x21-\x7e" +# header-field = field-name ":" OWS field-value OWS +# field-name = token +# field-value = *( field-content / obs-fold ) +# field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +# field-vchar = VCHAR / obs-text +# Errata from: https://www.rfc-editor.org/errata_search.php?rfc=7230&eid=4189 +# changes field-content to: +# +# field-content = field-vchar [ 1*( SP / HTAB / field-vchar ) +# field-vchar ] + +FIELD_VCHAR = "[" + VCHAR + OBS_TEXT + "]" +FIELD_CONTENT = FIELD_VCHAR + "([ \t" + VCHAR + OBS_TEXT + "]+" + FIELD_VCHAR + "){,1}" +FIELD_VALUE = "(" + FIELD_CONTENT + "){0,}" + +HEADER_FIELD = re.compile( + # tobytes( + "^(?P" + TOKEN + "):" + OWS + "(?P" + FIELD_VALUE + ")" + OWS + "$" + # ) + ) + +# https://github.com/github/codeql-python-CVE-coverage/issues/224 +pattern = re.compile( + r'^(:?(([a-zA-Z]{1})|([a-zA-Z]{1}[a-zA-Z]{1})|' # domain pt.1 + r'([a-zA-Z]{1}[0-9]{1})|([0-9]{1}[a-zA-Z]{1})|' # domain pt.2 + r'([a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]))\.)+' # domain pt.3 + r'([a-zA-Z]{2,13}|(xn--[a-zA-Z0-9]{2,30}))$' # TLD +) + +# https://github.com/github/codeql-python-CVE-coverage/issues/189 +URL_REGEX = ( + r'(?i)\b((?:[a-z][\w-]+:(?:/{1,3}|[a-z0-9%])|www\d{0,3}[.]|' + r'[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|' + r'(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|' + r'[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))' # "emacs! +) + +url = re.compile(URL_REGEX) diff --git a/python/ql/test/query-tests/Security/CWE-730/ReDoS.expected b/python/ql/test/query-tests/Security/CWE-730/ReDoS.expected new file mode 100644 index 000000000000..1286abcc0a70 --- /dev/null +++ b/python/ql/test/query-tests/Security/CWE-730/ReDoS.expected @@ -0,0 +1,98 @@ +| KnownCVEs.py:15:22:15:24 | \\d+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '9'. | +| KnownCVEs.py:30:24:31:25 | .* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of ','. | +| KnownCVEs.py:35:18:35:81 | ([-/:,#%.'"\\s!\\w]\|\\w-\\w\|'[\\s\\w]+'\\s*\|"[\\s\\w]+"\|\\([\\d,%\\.\\s]+\\))* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '"\\t"'. | +| redos.py:6:28:6:42 | (?:__\|[\\s\\S])+? | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '__'. | +| redos.py:6:52:6:68 | (?:\\*\\*\|[\\s\\S])+? | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '**'. | +| redos.py:21:34:21:53 | (?:[^"\\\\]\|\\\\\\\\\|\\\\.)+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '\\\\\\\\'. | +| redos.py:21:57:21:76 | (?:[^'\\\\]\|\\\\\\\\\|\\\\.)+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '\\\\\\\\'. | +| redos.py:21:81:21:100 | (?:[^)\\\\]\|\\\\\\\\\|\\\\.)+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '\\\\\\\\'. | +| redos.py:33:64:33:65 | .* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '\|\|\\n'. | +| redos.py:38:33:38:42 | (\\\\\\/\|.)*? | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '\\\\/'. | +| redos.py:43:37:43:38 | .* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '#'. | +| redos.py:49:41:49:43 | .*? | This part of the regular expression may cause exponential backtracking on strings starting with '"' and containing many repetitions of '""'. | +| redos.py:49:47:49:49 | .*? | This part of the regular expression may cause exponential backtracking on strings starting with ''' and containing many repetitions of ''''. | +| redos.py:54:47:54:49 | .*? | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of ']['. | +| redos.py:54:80:54:82 | .*? | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of ']['. | +| redos.py:60:25:60:30 | [a-z]+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. | +| redos.py:61:25:61:30 | [a-z]* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. | +| redos.py:62:53:62:64 | [a-zA-Z0-9]+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '0'. | +| redos.py:63:26:63:33 | ([a-z])+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'aa'. | +| redos.py:68:26:68:41 | [\\w#:.~>+()\\s-]+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '\\t'. | +| redos.py:68:48:68:50 | .*? | This part of the regular expression may cause exponential backtracking on strings starting with '[' and containing many repetitions of ']['. | +| redos.py:73:29:73:36 | (\\\\?.)*? | This part of the regular expression may cause exponential backtracking on strings starting with '"' and containing many repetitions of '\\\\a'. | +| redos.py:76:24:76:31 | (b\|a?b)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'b'. | +| redos.py:79:24:79:31 | (a\|aa?)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. | +| redos.py:91:24:91:31 | (a\|aa?)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. | +| redos.py:97:25:97:38 | ([\\s\\S]\|[^a])* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '`'. | +| redos.py:103:25:103:33 | (.\|[^a])* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '`'. | +| redos.py:109:25:109:33 | (b\|[^a])* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'b'. | +| redos.py:112:25:112:33 | (G\|[^a])* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'G'. | +| redos.py:115:25:115:37 | ([0-9]\|[^a])* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '0'. | +| redos.py:127:25:127:38 | ([a-z]\|[d-h])* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'd'. | +| redos.py:130:25:130:40 | ([^a-z]\|[^0-9])* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '/'. | +| redos.py:133:25:133:35 | (\\d\|[0-9])* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '0'. | +| redos.py:136:25:136:32 | (\\s\|\\s)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of ' '. | +| redos.py:139:25:139:31 | (\\w\|G)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'G'. | +| redos.py:145:25:145:32 | (\\d\|\\w)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '0'. | +| redos.py:148:25:148:31 | (\\d\|5)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '5'. | +| redos.py:151:25:151:34 | (\\s\|[\\f])* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '\u000c'. | +| redos.py:157:25:157:34 | (\\f\|[\\f])* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '\u000c'. | +| redos.py:160:25:160:32 | (\\W\|\\D)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of ' '. | +| redos.py:163:25:163:32 | (\\S\|\\w)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '0'. | +| redos.py:166:25:166:34 | (\\S\|[\\w])* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '0'. | +| redos.py:169:25:169:37 | (1s\|[\\da-z])* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '1s'. | +| redos.py:172:25:172:33 | (0\|[\\d])* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '0'. | +| redos.py:175:26:175:30 | [\\d]+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '0'. | +| redos.py:187:26:187:31 | [^>a]+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '='. | +| redos.py:190:27:190:29 | \\s* | This part of the regular expression may cause exponential backtracking on strings starting with '\\n' and containing many repetitions of '\\n'. | +| redos.py:193:28:193:30 | \\s+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of ' '. | +| redos.py:196:78:196:89 | [ a-zA-Z{}]+ | This part of the regular expression may cause exponential backtracking on strings starting with '{[A(A)A:' and containing many repetitions of ' A:'. | +| redos.py:196:91:196:92 | ,? | This part of the regular expression may cause exponential backtracking on strings starting with '{[A(A)A: ' and containing many repetitions of ',A: '. | +| redos.py:199:25:199:26 | a+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. | +| redos.py:199:28:199:29 | b+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'b'. | +| redos.py:202:26:202:32 | (a+a?)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. | +| redos.py:202:27:202:28 | a+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. | +| redos.py:205:25:205:26 | a+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. | +| redos.py:211:25:211:26 | a+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. | +| redos.py:217:25:217:27 | \\n+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '\\n'. | +| redos.py:220:25:220:29 | [^X]+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'W'. | +| redos.py:223:30:223:30 | b | This part of the regular expression may cause exponential backtracking on strings starting with 'W' and containing many repetitions of 'bW'. | +| redos.py:229:30:229:30 | b | This part of the regular expression may cause exponential backtracking on strings starting with 'W' and containing many repetitions of 'bW'. | +| redos.py:241:27:241:27 | b | This part of the regular expression may cause exponential backtracking on strings starting with 'a' and containing many repetitions of 'ba'. | +| redos.py:247:25:247:31 | [\\n\\s]+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '\\n'. | +| redos.py:256:25:256:27 | \\w* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'foobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbaz'. | +| redos.py:256:37:256:39 | \\w* | This part of the regular expression may cause exponential backtracking on strings starting with 'foobarbaz' and containing many repetitions of 'foobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbaz'. | +| redos.py:256:49:256:51 | \\w* | This part of the regular expression may cause exponential backtracking on strings starting with 'foobarbazfoobarbaz' and containing many repetitions of 'foobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbaz'. | +| redos.py:256:61:256:63 | \\w* | This part of the regular expression may cause exponential backtracking on strings starting with 'foobarbazfoobarbazfoobarbaz' and containing many repetitions of 'foobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbaz'. | +| redos.py:259:24:259:126 | (.thisisagoddamnlongstringforstresstestingthequery\|\\sthisisagoddamnlongstringforstresstestingthequery)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of ' thisisagoddamnlongstringforstresstestingthequery'. | +| redos.py:262:24:262:87 | (thisisagoddamnlongstringforstresstestingthequery\|this\\w+query)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'thisisagoddamnlongstringforstresstestingthequery'. | +| redos.py:262:78:262:80 | \\w+ | This part of the regular expression may cause exponential backtracking on strings starting with 'this' and containing many repetitions of 'aquerythis'. | +| redos.py:274:31:274:32 | b+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'b'. | +| redos.py:277:48:277:50 | \\s* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '"" a='. | +| redos.py:283:26:283:27 | a+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. | +| redos.py:286:26:286:27 | a+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. | +| redos.py:292:26:292:27 | a+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. | +| redos.py:295:35:295:36 | a+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. | +| redos.py:301:100:301:101 | e+ | This part of the regular expression may cause exponential backtracking on strings starting with ';00000000000000' and containing many repetitions of 'e'. | +| redos.py:304:28:304:29 | c+ | This part of the regular expression may cause exponential backtracking on strings starting with 'ab' and containing many repetitions of 'c'. | +| redos.py:307:28:307:30 | \\s+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of ' '. | +| redos.py:310:26:310:34 | ([^/]\|X)+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'X'. | +| redos.py:313:30:313:34 | [^Y]+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'Xx'. | +| redos.py:316:25:316:26 | a* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. | +| redos.py:319:28:319:33 | [\\w-]* | This part of the regular expression may cause exponential backtracking on strings starting with 'foo' and containing many repetitions of '-'. | +| redos.py:322:25:322:29 | (ab)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'ab'. | +| redos.py:325:24:325:30 | (a?a?)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. | +| redos.py:334:24:334:32 | (?:a\|a?)+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. | +| redos.py:340:27:340:55 | (([a-c]\|[c-d])T(e?e?e?e?\|X))+ | This part of the regular expression may cause exponential backtracking on strings starting with 'PRE' and containing many repetitions of 'cTX'. | +| redos.py:343:26:343:29 | (a)+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'aa'. | +| redos.py:346:26:346:27 | b+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'bb'. | +| redos.py:352:25:352:26 | a* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. | +| redos.py:353:25:353:26 | a+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. | +| redos.py:354:25:354:26 | a* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. | +| redos.py:355:25:355:26 | a+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. | +| redos.py:362:25:362:40 | ((?:a{\|-)\|\\w\\{)+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a{'. | +| redos.py:363:25:363:43 | ((?:a{0\|-)\|\\w\\{\\d)+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a{0'. | +| redos.py:364:25:364:45 | ((?:a{0,\|-)\|\\w\\{\\d,)+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a{0,'. | +| redos.py:365:25:365:48 | ((?:a{0,2\|-)\|\\w\\{\\d,\\d)+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a{0,2'. | +| unittests.py:5:17:5:23 | (\u00c6\|\\\u00c6)+ | This part of the regular expression may cause exponential backtracking on strings starting with 'X' and containing many repetitions of '\u00c6'. | +| unittests.py:9:16:9:24 | (?:.\|\\n)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '\\n'. | diff --git a/python/ql/test/query-tests/Security/CWE-730/ReDoS.qlref b/python/ql/test/query-tests/Security/CWE-730/ReDoS.qlref new file mode 100644 index 000000000000..4c19d395edbe --- /dev/null +++ b/python/ql/test/query-tests/Security/CWE-730/ReDoS.qlref @@ -0,0 +1 @@ +Security/CWE-730/ReDoS.ql diff --git a/python/ql/test/query-tests/Security/CWE-730/redos.py b/python/ql/test/query-tests/Security/CWE-730/redos.py new file mode 100644 index 000000000000..266cb210bd3b --- /dev/null +++ b/python/ql/test/query-tests/Security/CWE-730/redos.py @@ -0,0 +1,368 @@ +import re + +# NOT GOOD; attack: "_" + "__".repeat(100) +# Adapted from marked (https://github.com/markedjs/marked), which is licensed +# under the MIT license; see file marked-LICENSE. +bad1 = re.compile(r'''^\b_((?:__|[\s\S])+?)_\b|^\*((?:\*\*|[\s\S])+?)\*(?!\*)''') + +# GOOD +# Adapted from marked (https://github.com/markedjs/marked), which is licensed +# under the MIT license; see file marked-LICENSE. +good1 = re.compile(r'^\b_((?:__|[^_])+?)_\b|^\*((?:\*\*|[^*])+?)\*(?!\*)') + +# GOOD - there is no witness in the end that could cause the regexp to not match +# Adapted from brace-expansion (https://github.com/juliangruber/brace-expansion), +# which is licensed under the MIT license; see file brace-expansion-LICENSE. +good2 = re.compile(r'(.*,)+.+') + +# NOT GOOD; attack: " '" + "\\\\".repeat(100) +# Adapted from CodeMirror (https://github.com/codemirror/codemirror), +# which is licensed under the MIT license; see file CodeMirror-LICENSE. +bad2 = re.compile(r'''^(?:\s+(?:"(?:[^"\\]|\\\\|\\.)+"|'(?:[^'\\]|\\\\|\\.)+'|\((?:[^)\\]|\\\\|\\.)+\)))?''') + +# GOOD +# Adapted from lulucms2 (https://github.com/yiifans/lulucms2). +good2 = re.compile(r'''\(\*(?:[\s\S]*?\(\*[\s\S]*?\*\))*[\s\S]*?\*\)''') + +# GOOD +# Adapted from jest (https://github.com/facebook/jest), which is licensed +# under the MIT license; see file jest-LICENSE. +good3 = re.compile(r'''^ *(\S.*\|.*)\n *([-:]+ *\|[-| :]*)\n((?:.*\|.*(?:\n|$))*)\n*''') + +# NOT GOOD, variant of good3; attack: "a|\n:|\n" + "||\n".repeat(100) +bad4 = re.compile(r'''^ *(\S.*\|.*)\n *([-:]+ *\|[-| :]*)\n((?:.*\|.*(?:\n|$))*)a''') + +# NOT GOOD; attack: "/" + "\\/a".repeat(100) +# Adapted from ANodeBlog (https://github.com/gefangshuai/ANodeBlog), +# which is licensed under the Apache License 2.0; see file ANodeBlog-LICENSE. +bad5 = re.compile(r'''\/(?![ *])(\\\/|.)*?\/[gim]*(?=\W|$)''') + +# NOT GOOD; attack: "##".repeat(100) + "\na" +# Adapted from CodeMirror (https://github.com/codemirror/codemirror), +# which is licensed under the MIT license; see file CodeMirror-LICENSE. +bad6 = re.compile(r'''^([\s\[\{\(]|#.*)*$''') + +# GOOD +good4 = re.compile(r'''(\r\n|\r|\n)+''') + +# BAD - PoC: `node -e "/((?:[^\"\']|\".*?\"|\'.*?\')*?)([(,)]|$)/.test(\"'''''''''''''''''''''''''''''''''''''''''''''\\\"\");"`. It's complicated though, because the regexp still matches something, it just matches the empty-string after the attack string. +actuallyBad = re.compile(r'''((?:[^"']|".*?"|'.*?')*?)([(,)]|$)''') + +# NOT GOOD; attack: "a" + "[]".repeat(100) + ".b\n" +# Adapted from Knockout (https://github.com/knockout/knockout), which is +# licensed under the MIT license; see file knockout-LICENSE +bad6 = re.compile(r'''^[\_$a-z][\_$a-z0-9]*(\[.*?\])*(\.[\_$a-z][\_$a-z0-9]*(\[.*?\])*)*$''') + +# GOOD +good6 = re.compile(r'''(a|.)*''') + +# Testing the NFA - only some of the below are detected. +bad7 = re.compile(r'''^([a-z]+)+$''') +bad8 = re.compile(r'''^([a-z]*)*$''') +bad9 = re.compile(r'''^([a-zA-Z0-9])(([\\-.]|[_]+)?([a-zA-Z0-9]+))*(@){1}[a-z0-9]+[.]{1}(([a-z]{2,3})|([a-z]{2,3}[.]{1}[a-z]{2,3}))$''') +bad10 = re.compile(r'''^(([a-z])+.)+[A-Z]([a-z])+$''') + +# NOT GOOD; attack: "[" + "][".repeat(100) + "]!" +# Adapted from Prototype.js (https://github.com/prototypejs/prototype), which +# is licensed under the MIT license; see file Prototype.js-LICENSE. +bad11 = re.compile(r'''(([\w#:.~>+()\s-]+|\*|\[.*?\])+)\s*(,|$)''') + +# NOT GOOD; attack: "'" + "\\a".repeat(100) + '"' +# Adapted from Prism (https://github.com/PrismJS/prism), which is licensed +# under the MIT license; see file Prism-LICENSE. +bad12 = re.compile(r'''("|')(\\?.)*?\1''') + +# NOT GOOD +bad13 = re.compile(r'''(b|a?b)*c''') + +# NOT GOOD +bad15 = re.compile(r'''(a|aa?)*b''') + +# GOOD +good7 = re.compile(r'''(.|\n)*!''') + +# NOT GOOD; attack: "\n".repeat(100) + "." +bad16 = re.compile(r'''(.|\n)*!''') + +# GOOD +good8 = re.compile(r'''([\w.]+)*''') + +# NOT GOOD +bad17 = re.compile(r'''(a|aa?)*b''') + +# GOOD - not used as regexp +good9 = '(a|aa?)*b' + +# NOT GOOD +bad18 = re.compile(r'''(([\s\S]|[^a])*)"''') + +# GOOD - there is no witness in the end that could cause the regexp to not match +good10 = re.compile(r'''([^"']+)*''') + +# NOT GOOD +bad20 = re.compile(r'''((.|[^a])*)"''') + +# GOOD +good10 = re.compile(r'''((a|[^a])*)"''') + +# NOT GOOD +bad21 = re.compile(r'''((b|[^a])*)"''') + +# NOT GOOD +bad22 = re.compile(r'''((G|[^a])*)"''') + +# NOT GOOD +bad23 = re.compile(r'''(([0-9]|[^a])*)"''') + +# NOT GOOD +bad24 = re.compile(r'''(?:=(?:([!#\$%&'\*\+\-\.\^_`\|~0-9A-Za-z]+)|"((?:\\[\x00-\x7f]|[^\x00-\x08\x0a-\x1f\x7f"])*)"))?''') + +# NOT GOOD +bad25 = re.compile(r'''"((?:\\[\x00-\x7f]|[^\x00-\x08\x0a-\x1f\x7f"])*)"''') + +# GOOD +bad26 = re.compile(r'''"((?:\\[\x00-\x7f]|[^\x00-\x08\x0a-\x1f\x7f"\\])*)"''') + +# NOT GOOD +bad27 = re.compile(r'''(([a-z]|[d-h])*)"''') + +# NOT GOOD +bad27 = re.compile(r'''(([^a-z]|[^0-9])*)"''') + +# NOT GOOD +bad28 = re.compile(r'''((\d|[0-9])*)"''') + +# NOT GOOD +bad29 = re.compile(r'''((\s|\s)*)"''') + +# NOT GOOD +bad30 = re.compile(r'''((\w|G)*)"''') + +# GOOD +good11 = re.compile(r'''((\s|\d)*)"''') + +# NOT GOOD +bad31 = re.compile(r'''((\d|\w)*)"''') + +# NOT GOOD +bad32 = re.compile(r'''((\d|5)*)"''') + +# NOT GOOD +bad33 = re.compile(r'''((\s|[\f])*)"''') + +# NOT GOOD +bad34 = re.compile(r'''((\s|[\v]|\\v)*)"''') + +# NOT GOOD +bad35 = re.compile(r'''((\f|[\f])*)"''') + +# NOT GOOD +bad36 = re.compile(r'''((\W|\D)*)"''') + +# NOT GOOD +bad37 = re.compile(r'''((\S|\w)*)"''') + +# NOT GOOD +bad38 = re.compile(r'''((\S|[\w])*)"''') + +# NOT GOOD +bad39 = re.compile(r'''((1s|[\da-z])*)"''') + +# NOT GOOD +bad40 = re.compile(r'''((0|[\d])*)"''') + +# NOT GOOD +bad41 = re.compile(r'''(([\d]+)*)"''') + +# GOOD - there is no witness in the end that could cause the regexp to not match +good12 = re.compile(r'''(\d+(X\d+)?)+''') + +# GOOD - there is no witness in the end that could cause the regexp to not match +good13 = re.compile(r'''([0-9]+(X[0-9]*)?)*''') + +# GOOD +good15 = re.compile(r'''^([^>]+)*(>|$)''') + +# NOT GOOD +bad43 = re.compile(r'''^([^>a]+)*(>|$)''') + +# NOT GOOD +bad44 = re.compile(r'''(\n\s*)+$''') + +# NOT GOOD +bad45 = re.compile(r'''^(?:\s+|#.*|\(\?#[^)]*\))*(?:[?*+]|{\d+(?:,\d*)?})''') + +# NOT GOOD +bad46 = re.compile(r'''\{\[\s*([a-zA-Z]+)\(([a-zA-Z]+)\)((\s*([a-zA-Z]+)\: ?([ a-zA-Z{}]+),?)+)*\s*\]\}''') + +# NOT GOOD +bad47 = re.compile(r'''(a+|b+|c+)*c''') + +# NOT GOOD +bad48 = re.compile(r'''(((a+a?)*)+b+)''') + +# NOT GOOD +bad49 = re.compile(r'''(a+)+bbbb''') + +# GOOD +good16 = re.compile(r'''(a+)+aaaaa*a+''') + +# NOT GOOD +bad50 = re.compile(r'''(a+)+aaaaa$''') + +# GOOD +good17 = re.compile(r'''(\n+)+\n\n''') + +# NOT GOOD +bad51 = re.compile(r'''(\n+)+\n\n$''') + +# NOT GOOD +bad52 = re.compile(r'''([^X]+)*$''') + +# NOT GOOD +bad53 = re.compile(r'''(([^X]b)+)*$''') + +# GOOD +good18 = re.compile(r'''(([^X]b)+)*($|[^X]b)''') + +# NOT GOOD +bad54 = re.compile(r'''(([^X]b)+)*($|[^X]c)''') + +# GOOD +good20 = re.compile(r'''((ab)+)*ababab''') + +# GOOD +good21 = re.compile(r'''((ab)+)*abab(ab)*(ab)+''') + +# GOOD +good22 = re.compile(r'''((ab)+)*''') + +# NOT GOOD +bad55 = re.compile(r'''((ab)+)*$''') + +# GOOD +good23 = re.compile(r'''((ab)+)*[a1][b1][a2][b2][a3][b3]''') + +# NOT GOOD +bad56 = re.compile(r'''([\n\s]+)*(.)''') + +# GOOD - any witness passes through the accept state. +good24 = re.compile(r'''(A*A*X)*''') + +# GOOD +good26 = re.compile(r'''([^\\\]]+)*''') + +# NOT GOOD +bad59 = re.compile(r'''(\w*foobarbaz\w*foobarbaz\w*foobarbaz\w*foobarbaz\s*foobarbaz\d*foobarbaz\w*)+-''') + +# NOT GOOD +bad60 = re.compile(r'''(.thisisagoddamnlongstringforstresstestingthequery|\sthisisagoddamnlongstringforstresstestingthequery)*-''') + +# NOT GOOD +bad61 = re.compile(r'''(thisisagoddamnlongstringforstresstestingthequery|this\w+query)*-''') + +# GOOD +good27 = re.compile(r'''(thisisagoddamnlongstringforstresstestingthequery|imanotherbutunrelatedstringcomparedtotheotherstring)*-''') + +# GOOD +good28 = re.compile(r'''foo([\uDC66\uDC67]|[\uDC68\uDC69])*foo''') + +# GOOD +good29 = re.compile(r'''foo((\uDC66|\uDC67)|(\uDC68|\uDC69))*foo''') + +# NOT GOOD (but cannot currently construct a prefix) +bad62 = re.compile(r'''a{2,3}(b+)+X''') + +# NOT GOOD (and a good prefix test) +bad63 = re.compile(r'''^<(\w+)((?:\s+\w+(?:\s*=\s*(?:(?:"[^"]*")|(?:'[^']*')|[^>\s]+))?)*)\s*(\/?)>''') + +# GOOD +good30 = re.compile(r'''(a+)*[\s\S][\s\S][\s\S]?''') + +# GOOD - but we fail to see that repeating the attack string ends in the "accept any" state (due to not parsing the range `[\s\S]{2,3}`). +good31 = re.compile(r'''(a+)*[\s\S]{2,3}''') + +# GOOD - but we spuriously conclude that a rejecting suffix exists (due to not parsing the range `[\s\S]{2,}` when constructing the NFA). +good32 = re.compile(r'''(a+)*([\s\S]{2,}|X)$''') + +# GOOD +good33 = re.compile(r'''(a+)*([\s\S]*|X)$''') + +# NOT GOOD +bad64 = re.compile(r'''((a+)*$|[\s\S]+)''') + +# GOOD - but still flagged. The only change compared to the above is the order of alternatives, which we don't model. +good34 = re.compile(r'''([\s\S]+|(a+)*$)''') + +# GOOD +good35 = re.compile(r'''((;|^)a+)+$''') + +# NOT GOOD (a good prefix test) +bad65 = re.compile(r'''(^|;)(0|1)(0|1)(0|1)(0|1)(0|1)(0|1)(0|1)(0|1)(0|1)(0|1)(0|1)(0|1)(0|1)(0|1)(e+)+f''') + +# NOT GOOD +bad66 = re.compile(r'''^ab(c+)+$''') + +# NOT GOOD +bad67 = re.compile(r'''(\d(\s+)*){20}''') + +# GOOD - but we spuriously conclude that a rejecting suffix exists. +good36 = re.compile(r'''(([^/]|X)+)(\/[\s\S]*)*$''') + +# GOOD - but we spuriously conclude that a rejecting suffix exists. +good37 = re.compile(r'''^((x([^Y]+)?)*(Y|$))''') + +# NOT GOOD +bad68 = re.compile(r'''(a*)+b''') + +# NOT GOOD +bad69 = re.compile(r'''foo([\w-]*)+bar''') + +# NOT GOOD +bad70 = re.compile(r'''((ab)*)+c''') + +# NOT GOOD +bad71 = re.compile(r'''(a?a?)*b''') + +# GOOD +good38 = re.compile(r'''(a?)*b''') + +# NOT GOOD - but not detected +bad72 = re.compile(r'''(c?a?)*b''') + +# NOT GOOD +bad73 = re.compile(r'''(?:a|a?)+b''') + +# NOT GOOD - but not detected. +bad74 = re.compile(r'''(a?b?)*$''') + +# NOT GOOD +bad76 = re.compile(r'''PRE(([a-c]|[c-d])T(e?e?e?e?|X))+(cTcT|cTXcTX$)''') + +# NOT GOOD - but not detected +bad77 = re.compile(r'''^((a)+\w)+$''') + +# NOT GOOD +bad78 = re.compile(r'''^(b+.)+$''') + +# GOOD +good39 = re.compile(r'''a*b''') + +# All 4 bad combinations of nested * and + +bad79 = re.compile(r'''(a*)*b''') +bad80 = re.compile(r'''(a+)*b''') +bad81 = re.compile(r'''(a*)+b''') +bad82 = re.compile(r'''(a+)+b''') + +# GOOD +good40 = re.compile(r'''(a|b)+''') +good41 = re.compile(r'''(?:[\s;,"'<>(){}|[\]@=+*]|:(?![/\\]))+''') # parses wrongly, sees column 42 as a char set start + +# NOT GOOD +bad83 = re.compile(r'''^((?:a{|-)|\w\{)+X$''') +bad84 = re.compile(r'''^((?:a{0|-)|\w\{\d)+X$''') +bad85 = re.compile(r'''^((?:a{0,|-)|\w\{\d,)+X$''') +bad86 = re.compile(r'''^((?:a{0,2|-)|\w\{\d,\d)+X$''') + +# GOOD: +good42 = re.compile(r'''^((?:a{0,2}|-)|\w\{\d,\d\})+X$''') diff --git a/python/ql/test/query-tests/Security/CWE-730/unittests.py b/python/ql/test/query-tests/Security/CWE-730/unittests.py new file mode 100644 index 000000000000..7b69c10771fc --- /dev/null +++ b/python/ql/test/query-tests/Security/CWE-730/unittests.py @@ -0,0 +1,9 @@ +import re + +# Treatment of escapes +re.compile(r"X([^\.]|\.)*$") # No ReDoS. +re.compile(r"X(Æ|\Æ)+$") # Has ReDoS. + +# Treatment of line breaks +re.compile(r'(?:.|\n)*b') # No ReDoS. +re.compile(r'(?:.|\n)*b', re.DOTALL) # Has ReDoS. From c7992f6c6eb07ba7caa5bf5a8b65cde7ffcc9a3e Mon Sep 17 00:00:00 2001 From: Rasmus Lerchedahl Petersen Date: Mon, 28 Jun 2021 17:24:37 +0200 Subject: [PATCH 09/22] Python: add change note --- python/change-notes/2021-07-28-port-RoDoS-queries.md | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 python/change-notes/2021-07-28-port-RoDoS-queries.md diff --git a/python/change-notes/2021-07-28-port-RoDoS-queries.md b/python/change-notes/2021-07-28-port-RoDoS-queries.md new file mode 100644 index 000000000000..a53a5780b34b --- /dev/null +++ b/python/change-notes/2021-07-28-port-RoDoS-queries.md @@ -0,0 +1,3 @@ +lgtm,codescanning +* Ported _Inefficient regular expression_ (`py/redos`) query from javascript. +* Ported _Polynomial regular expression used on uncontrolled data_ [`py/polynomial-redos`] query from javascript. From 135b71b6492a0366cbb90fa3873fe35c1c6b873d Mon Sep 17 00:00:00 2001 From: Rasmus Lerchedahl Petersen Date: Tue, 29 Jun 2021 11:01:33 +0200 Subject: [PATCH 10/22] Python: Apply performance fix by @hvitved --- python/ql/src/semmle/python/RegexTreeView.qll | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/python/ql/src/semmle/python/RegexTreeView.qll b/python/ql/src/semmle/python/RegexTreeView.qll index 30c6dc70f0d8..e1f9a58251a8 100644 --- a/python/ql/src/semmle/python/RegexTreeView.qll +++ b/python/ql/src/semmle/python/RegexTreeView.qll @@ -358,6 +358,11 @@ class RegExpSequence extends RegExpTerm, TRegExpSequence { override string getPrimaryQLClass() { result = "RegExpSequence" } } +pragma[nomagic] +private int seqChildEnd(Regex re, int start, int end, int i) { + result = seqChild(re, start, end, i).getEnd() +} + // moved out so we can use it in the charpred private RegExpTerm seqChild(Regex re, int start, int end, int i) { re.sequence(start, end) and @@ -372,7 +377,7 @@ private RegExpTerm seqChild(Regex re, int start, int end, int i) { or i > 0 and result.getRegex() = re and - exists(int itemStart | itemStart = seqChild(re, start, end, i - 1).getEnd() | + exists(int itemStart | itemStart = seqChildEnd(re, start, end, i - 1) | result.getStart() = itemStart and re.item(itemStart, result.getEnd()) ) @@ -440,7 +445,7 @@ class RegExpEscape extends RegExpNormalChar { or this.getUnescaped() = "t" and result = "\t" or - this.getUnescaped() = "f" and result = " " + this.getUnescaped() = "f" and result = "\f" or isUnicode() and result = getUnicode() From ffb8938e52572712eae3204c0074652b2ba7a417 Mon Sep 17 00:00:00 2001 From: Rasmus Lerchedahl Petersen Date: Tue, 29 Jun 2021 11:06:17 +0200 Subject: [PATCH 11/22] Python: undo autoformat character mangling --- python/ql/src/semmle/python/RegexTreeView.qll | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/ql/src/semmle/python/RegexTreeView.qll b/python/ql/src/semmle/python/RegexTreeView.qll index e1f9a58251a8..e052a939af9e 100644 --- a/python/ql/src/semmle/python/RegexTreeView.qll +++ b/python/ql/src/semmle/python/RegexTreeView.qll @@ -445,7 +445,7 @@ class RegExpEscape extends RegExpNormalChar { or this.getUnescaped() = "t" and result = "\t" or - this.getUnescaped() = "f" and result = "\f" + this.getUnescaped() = "f" and result = " " or isUnicode() and result = getUnicode() From 6f2cdbf59e90c6c71d6acdbdcf78875920672217 Mon Sep 17 00:00:00 2001 From: Rasmus Lerchedahl Petersen Date: Tue, 29 Jun 2021 11:14:27 +0200 Subject: [PATCH 12/22] Python: Give up on providing values for form feeds --- python/ql/src/semmle/python/RegexTreeView.qll | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/python/ql/src/semmle/python/RegexTreeView.qll b/python/ql/src/semmle/python/RegexTreeView.qll index e052a939af9e..bdf73f36ba0d 100644 --- a/python/ql/src/semmle/python/RegexTreeView.qll +++ b/python/ql/src/semmle/python/RegexTreeView.qll @@ -445,8 +445,9 @@ class RegExpEscape extends RegExpNormalChar { or this.getUnescaped() = "t" and result = "\t" or - this.getUnescaped() = "f" and result = " " - or + // TODO: Find a way to include a formfeed character + // this.getUnescaped() = "f" and result = " " + // or isUnicode() and result = getUnicode() } From fbfe41516242bb97b6276f5ecd5f64feed8ba965 Mon Sep 17 00:00:00 2001 From: Rasmus Lerchedahl Petersen Date: Tue, 29 Jun 2021 11:18:24 +0200 Subject: [PATCH 13/22] Python: Limit test files --- python/ql/test/library-tests/regex/Alternation.ql | 4 +++- python/ql/test/library-tests/regex/GroupContents.ql | 4 +++- python/ql/test/library-tests/regex/Mode.ql | 1 + python/ql/test/library-tests/regex/Qualified.ql | 4 +++- 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/python/ql/test/library-tests/regex/Alternation.ql b/python/ql/test/library-tests/regex/Alternation.ql index b369f822d4ab..d172e7789433 100644 --- a/python/ql/test/library-tests/regex/Alternation.ql +++ b/python/ql/test/library-tests/regex/Alternation.ql @@ -2,6 +2,8 @@ import python import semmle.python.regex from Regex r, int start, int end, int part_start, int part_end -where r.alternationOption(start, end, part_start, part_end) +where + r.getLocation().getFile().getBaseName() = "test.py" and + r.alternationOption(start, end, part_start, part_end) select r.getText(), start, end, r.getText().substring(start, end), part_start, part_end, r.getText().substring(part_start, part_end) diff --git a/python/ql/test/library-tests/regex/GroupContents.ql b/python/ql/test/library-tests/regex/GroupContents.ql index 4fd7d9d229ef..221edbe44ba8 100644 --- a/python/ql/test/library-tests/regex/GroupContents.ql +++ b/python/ql/test/library-tests/regex/GroupContents.ql @@ -2,6 +2,8 @@ import python import semmle.python.regex from Regex r, int start, int end, int part_start, int part_end -where r.groupContents(start, end, part_start, part_end) +where + r.getLocation().getFile().getBaseName() = "test.py" and + r.groupContents(start, end, part_start, part_end) select r.getText(), start, end, r.getText().substring(start, end), part_start, part_end, r.getText().substring(part_start, part_end) diff --git a/python/ql/test/library-tests/regex/Mode.ql b/python/ql/test/library-tests/regex/Mode.ql index 02e84f86c5db..62449fbb3302 100644 --- a/python/ql/test/library-tests/regex/Mode.ql +++ b/python/ql/test/library-tests/regex/Mode.ql @@ -2,4 +2,5 @@ import python import semmle.python.regex from Regex r +where r.getLocation().getFile().getBaseName() = "test.py" select r.getLocation().getStartLine(), r.getAMode() diff --git a/python/ql/test/library-tests/regex/Qualified.ql b/python/ql/test/library-tests/regex/Qualified.ql index ee7a4b82bcf0..26400b3440f3 100644 --- a/python/ql/test/library-tests/regex/Qualified.ql +++ b/python/ql/test/library-tests/regex/Qualified.ql @@ -2,5 +2,7 @@ import python import semmle.python.regex from Regex r, int start, int end, boolean maybe_empty, boolean may_repeat_forever -where r.qualifiedItem(start, end, maybe_empty, may_repeat_forever) +where + r.getLocation().getFile().getBaseName() = "test.py" and + r.qualifiedItem(start, end, maybe_empty, may_repeat_forever) select r.getText(), start, end, maybe_empty, may_repeat_forever From e778a654641a3ca32da068602fb2569948326098 Mon Sep 17 00:00:00 2001 From: Rasmus Lerchedahl Petersen Date: Tue, 29 Jun 2021 11:29:42 +0200 Subject: [PATCH 14/22] Python: Adjust test expectations so we can see the light go green. But we should perhaps do something about those duplicate results. --- .../query-tests/Expressions/Regex/BackspaceEscape.expected | 2 ++ .../Expressions/Regex/DuplicateCharacterInSet.expected | 3 +++ .../Expressions/Regex/MissingPartSpecialGroup.expected | 2 ++ .../query-tests/Expressions/Regex/UnmatchableCaret.expected | 4 ++++ .../query-tests/Expressions/Regex/UnmatchableDollar.expected | 4 ++++ 5 files changed, 15 insertions(+) diff --git a/python/ql/test/query-tests/Expressions/Regex/BackspaceEscape.expected b/python/ql/test/query-tests/Expressions/Regex/BackspaceEscape.expected index 1f9bf778bd3e..bb4ffe1e0701 100644 --- a/python/ql/test/query-tests/Expressions/Regex/BackspaceEscape.expected +++ b/python/ql/test/query-tests/Expressions/Regex/BackspaceEscape.expected @@ -1,2 +1,4 @@ | test.py:17:12:17:22 | Str | Backspace escape in regular expression at offset 1. | +| test.py:17:12:17:22 | [\\b\\t ] | Backspace escape in regular expression at offset 1. | +| test.py:19:12:19:28 | E\\d+\\b[ \\b\\t] | Backspace escape in regular expression at offset 8. | | test.py:19:12:19:28 | Str | Backspace escape in regular expression at offset 8. | diff --git a/python/ql/test/query-tests/Expressions/Regex/DuplicateCharacterInSet.expected b/python/ql/test/query-tests/Expressions/Regex/DuplicateCharacterInSet.expected index b00619280698..2e5f52beda8a 100644 --- a/python/ql/test/query-tests/Expressions/Regex/DuplicateCharacterInSet.expected +++ b/python/ql/test/query-tests/Expressions/Regex/DuplicateCharacterInSet.expected @@ -1,3 +1,6 @@ | test.py:46:12:46:18 | Str | This regular expression includes duplicate character 'A' in a set of characters. | +| test.py:46:12:46:18 | [AA] | This regular expression includes duplicate character 'A' in a set of characters. | | test.py:47:12:47:19 | Str | This regular expression includes duplicate character '0' in a set of characters. | +| test.py:47:12:47:19 | [000] | This regular expression includes duplicate character '0' in a set of characters. | | test.py:48:12:48:21 | Str | This regular expression includes duplicate character '-' in a set of characters. | +| test.py:48:12:48:21 | [-0-9-] | This regular expression includes duplicate character '-' in a set of characters. | diff --git a/python/ql/test/query-tests/Expressions/Regex/MissingPartSpecialGroup.expected b/python/ql/test/query-tests/Expressions/Regex/MissingPartSpecialGroup.expected index e7bbad23899e..138c5bd06dcc 100644 --- a/python/ql/test/query-tests/Expressions/Regex/MissingPartSpecialGroup.expected +++ b/python/ql/test/query-tests/Expressions/Regex/MissingPartSpecialGroup.expected @@ -1,2 +1,4 @@ +| test.py:22:12:22:29 | (P[\\w]+) | Regular expression is missing '?' in named group. | | test.py:22:12:22:29 | Str | Regular expression is missing '?' in named group. | +| test.py:23:12:23:33 | (_(P[\\w]+)\|) | Regular expression is missing '?' in named group. | | test.py:23:12:23:33 | Str | Regular expression is missing '?' in named group. | diff --git a/python/ql/test/query-tests/Expressions/Regex/UnmatchableCaret.expected b/python/ql/test/query-tests/Expressions/Regex/UnmatchableCaret.expected index 8b9f409ad848..7063dd1700c7 100644 --- a/python/ql/test/query-tests/Expressions/Regex/UnmatchableCaret.expected +++ b/python/ql/test/query-tests/Expressions/Regex/UnmatchableCaret.expected @@ -1,4 +1,8 @@ +| test.py:4:12:4:19 | ^abc | This regular expression includes an unmatchable caret at offset 1. | | test.py:4:12:4:19 | Str | This regular expression includes an unmatchable caret at offset 1. | +| test.py:5:12:5:23 | (?s) ^abc | This regular expression includes an unmatchable caret at offset 5. | | test.py:5:12:5:23 | Str | This regular expression includes an unmatchable caret at offset 5. | | test.py:6:12:6:21 | Str | This regular expression includes an unmatchable caret at offset 2. | +| test.py:6:12:6:21 | \\[^123] | This regular expression includes an unmatchable caret at offset 2. | +| test.py:79:12:79:27 | (?<=foo)^\\w+ | This regular expression includes an unmatchable caret at offset 8. | | test.py:79:12:79:27 | Str | This regular expression includes an unmatchable caret at offset 8. | diff --git a/python/ql/test/query-tests/Expressions/Regex/UnmatchableDollar.expected b/python/ql/test/query-tests/Expressions/Regex/UnmatchableDollar.expected index f0f93436ce97..b3c44e98bb51 100644 --- a/python/ql/test/query-tests/Expressions/Regex/UnmatchableDollar.expected +++ b/python/ql/test/query-tests/Expressions/Regex/UnmatchableDollar.expected @@ -1,4 +1,8 @@ | test.py:29:12:29:19 | Str | This regular expression includes an unmatchable dollar at offset 3. | +| test.py:29:12:29:19 | abc$ | This regular expression includes an unmatchable dollar at offset 3. | | test.py:30:12:30:23 | Str | This regular expression includes an unmatchable dollar at offset 3. | +| test.py:30:12:30:23 | abc$ (?s) | This regular expression includes an unmatchable dollar at offset 3. | | test.py:31:12:31:20 | Str | This regular expression includes an unmatchable dollar at offset 2. | +| test.py:31:12:31:20 | \\[$] | This regular expression includes an unmatchable dollar at offset 2. | | test.py:80:12:80:26 | Str | This regular expression includes an unmatchable dollar at offset 3. | +| test.py:80:12:80:26 | \\w+$(?=foo) | This regular expression includes an unmatchable dollar at offset 3. | From c19522e921b3f987a2d1c1d1778ffaeecaa4dfc9 Mon Sep 17 00:00:00 2001 From: yoff Date: Wed, 30 Jun 2021 11:49:45 +0200 Subject: [PATCH 15/22] Apply suggestions from code review Co-authored-by: Rasmus Wriedt Larsen --- .../2021-07-28-port-RoDoS-queries.md | 4 +- python/ql/src/semmle/python/regex.qll | 51 +++++++++++++------ .../test/library-tests/regex/charRangeTest.py | 17 ++++++- .../test/library-tests/regex/charSetTest.py | 14 ++++- 4 files changed, 65 insertions(+), 21 deletions(-) diff --git a/python/change-notes/2021-07-28-port-RoDoS-queries.md b/python/change-notes/2021-07-28-port-RoDoS-queries.md index a53a5780b34b..9d628ad4a28b 100644 --- a/python/change-notes/2021-07-28-port-RoDoS-queries.md +++ b/python/change-notes/2021-07-28-port-RoDoS-queries.md @@ -1,3 +1,3 @@ lgtm,codescanning -* Ported _Inefficient regular expression_ (`py/redos`) query from javascript. -* Ported _Polynomial regular expression used on uncontrolled data_ [`py/polynomial-redos`] query from javascript. +* Added _Inefficient regular expression_ (`py/redos`) query, which is already available in JavaScript. +* Added _Polynomial regular expression used on uncontrolled data_ (`py/polynomial-redos`), which is already available in JavaScript. diff --git a/python/ql/src/semmle/python/regex.qll b/python/ql/src/semmle/python/regex.qll index 9aae8ded6a4d..a251bab5f4f8 100644 --- a/python/ql/src/semmle/python/regex.qll +++ b/python/ql/src/semmle/python/regex.qll @@ -130,36 +130,57 @@ abstract class RegexString extends Expr { /** result is true for those start chars that actually mark a start of a char set. */ boolean char_set_start(int pos) { exists(int index | - char_set_delimiter(index, pos) = true and + // is opening bracket + this.char_set_delimiter(index, pos) = true and ( - index = 1 and result = true // if a '[' is first in the string (among brackets), it starts a char set + // if this is the first bracket, `pos` starts a char set + index = 1 and result = true or + // if the previous char set delimiter was not a closing bracket, `pos` does + // not start a char set. This is needed to handle cases such as `[[]` (a + // char set that matches the `[` char) index > 1 and - not char_set_delimiter(index - 1, _) = false and + not this.char_set_delimiter(index - 1, _) = false and result = false or - exists(int p1 | - char_set_delimiter(index - 1, p1) = false and // if it is preceded by a closing bracket, it starts a char set + // special handling of cases such as `[][]` (the character-set of the characters `]` and `[`). + exists(int prev_closing_bracket_pos | + // previous bracket is a closing bracket + this.char_set_delimiter(index - 1, prev_closing_bracket_pos) = false and if - exists(int p2 | - p1 = p2 + 1 - or - this.getChar(p2 + 1) = "^" and - p1 = p2 + 2 + // check if the character that comes before the previous closing bracket + // is an opening bracket (taking `^` into account) + exists(int pos_before_prev_closing_bracket | + if this.getChar(prev_closing_bracket_pos - 1) = "^" + then pos_before_prev_closing_bracket = prev_closing_bracket_pos - 2 + else pos_before_prev_closing_bracket = prev_closing_bracket_pos - 1 | - char_set_delimiter(index - 2, p2) = true // but the closing bracket only closes... + this.char_set_delimiter(index - 2, pos_before_prev_closing_bracket) = true ) then - exists(int p2 | char_set_delimiter(index - 2, p2) = true | - result = char_set_start(p2).booleanNot() // ...if it is not the first in a char set + // brackets without anything in between is not valid character ranges, so + // the first closing bracket in `[]]` and `[^]]` does not count, + // + // and we should _not_ mark the second opening bracket in `[][]` and `[^][]` + // as starting a new char set. ^ ^ + exists(int pos_before_prev_closing_bracket | + this.char_set_delimiter(index - 2, pos_before_prev_closing_bracket) = true + | + result = this.char_set_start(pos_before_prev_closing_bracket).booleanNot() ) - else result = true + else + // if not, `pos` does in fact mark a real start of a character range + result = true ) ) ) } - /** result denotes if the index is a left bracket */ + /** + * Helper predicate for chars that could be character-set delimiters. + * Holds if the (non-escaped) char at `pos` in the string, is the (one-based) `index` occurrence of a bracket (`[` or `]`) in the string. + * Result if `true` is the char is `[`, and `false` if the char is `]`. + */ boolean char_set_delimiter(int index, int pos) { pos = rank[index](int p | this.nonEscapedCharAt(p) = "[" or this.nonEscapedCharAt(p) = "]") and ( diff --git a/python/ql/test/library-tests/regex/charRangeTest.py b/python/ql/test/library-tests/regex/charRangeTest.py index 692c5d9a83e6..de9de7ccaf39 100644 --- a/python/ql/test/library-tests/regex/charRangeTest.py +++ b/python/ql/test/library-tests/regex/charRangeTest.py @@ -1,13 +1,26 @@ import re -re.compile(r'[]-[]') #$ charRange=1:2-3:4 + +re.compile(r'[A-Z]') #$ charRange=1:2-3:4 + +try: + re.compile(r'[]-[]') #$ SPURIOUS: charRange=1:2-3:4 + raise Exception("this should not be reached") +except re.error: + pass + re.compile(r'[---]') #$ charRange=1:2-3:4 re.compile(r'[\---]') #$ charRange=1:3-4:5 re.compile(r'[--\-]') #$ charRange=1:2-3:5 re.compile(r'[\--\-]') #$ charRange=1:3-4:6 re.compile(r'[0-9-A-Z]') #$ charRange=1:2-3:4 charRange=5:6-7:8 re.compile(r'[0\-9-A-Z]') #$ charRange=4:5-6:7 -re.compile(r'[0--9-A-Z]') #$ charRange=1:2-3:4 charRange=4:5-6:7 + +try: + re.compile(r'[0--9-A-Z]') #$ SPURIOUS: charRange=1:2-3:4 charRange=4:5-6:7 + raise Exception("this should not be reached") +except re.error: + pass re.compile(r'[^A-Z]') #$ charRange=2:3-4:5 diff --git a/python/ql/test/library-tests/regex/charSetTest.py b/python/ql/test/library-tests/regex/charSetTest.py index 097f0c40347d..6649e5413610 100644 --- a/python/ql/test/library-tests/regex/charSetTest.py +++ b/python/ql/test/library-tests/regex/charSetTest.py @@ -10,8 +10,18 @@ re.compile("[[]]") #$ charSet=0:3 re.compile("[^]]") #$ charSet=0:4 re.compile("[^-]") #$ charSet=0:4 -re.compile("[]-[]") #$ charSet=0:5 -re.compile("[^]-[]") #$ charSet=0:6 + +try: + re.compile("[]-[]") #$ SPURIOUS: charSet=0:5 + raise Exception("this should not be reached") +except re.error: + pass + +try: + re.compile("[^]-[]") #$ SPURIOUS: charSet=0:6 + raise Exception("this should not be reached") +except re.error: + pass re.compile("]]][[[[]") #$ charSet=3:8 From 6dfbf804949640346d714d5c85ac8e900ef24546 Mon Sep 17 00:00:00 2001 From: Rasmus Lerchedahl Petersen Date: Wed, 30 Jun 2021 12:21:52 +0200 Subject: [PATCH 16/22] Python: Disable use of `toUnicode` until supporting CLI is released --- python/ql/src/semmle/python/RegexTreeView.qll | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/python/ql/src/semmle/python/RegexTreeView.qll b/python/ql/src/semmle/python/RegexTreeView.qll index bdf73f36ba0d..cb819e7f9c19 100644 --- a/python/ql/src/semmle/python/RegexTreeView.qll +++ b/python/ql/src/semmle/python/RegexTreeView.qll @@ -473,9 +473,11 @@ class RegExpEscape extends RegExpNormalChar { * E.g. for `\u0061` this returns "a". */ private string getUnicode() { - exists(int codepoint | codepoint = sum(getHexValueFromUnicode(_)) | - result = codepoint.toUnicode() - ) + // TODO: Enable this once a supporting CLI is released. + // exists(int codepoint | codepoint = sum(getHexValueFromUnicode(_)) | + // result = codepoint.toUnicode() + // ) + none() } /** From 09e71cfdfd098491df71c2bf75cde638fa8eb407 Mon Sep 17 00:00:00 2001 From: Rasmus Lerchedahl Petersen Date: Wed, 30 Jun 2021 12:25:29 +0200 Subject: [PATCH 17/22] Python: update test expectations --- .../Security/CWE-020/IncompleteHostnameRegExp.expected | 1 + python/ql/test/query-tests/Security/CWE-730/ReDoS.expected | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/python/ql/test/query-tests/Security/CWE-020/IncompleteHostnameRegExp.expected b/python/ql/test/query-tests/Security/CWE-020/IncompleteHostnameRegExp.expected index 00e0fcb95a0e..60c33c0e6531 100644 --- a/python/ql/test/query-tests/Security/CWE-020/IncompleteHostnameRegExp.expected +++ b/python/ql/test/query-tests/Security/CWE-020/IncompleteHostnameRegExp.expected @@ -1 +1,2 @@ +| hosttest.py:6:27:6:51 | (www\|beta).example.com/ | This regular expression has an unescaped '.' before 'example.com', so it might match more hosts than expected. | | hosttest.py:6:27:6:51 | Str | This regular expression has an unescaped '.' before 'example.com', so it might match more hosts than expected. | diff --git a/python/ql/test/query-tests/Security/CWE-730/ReDoS.expected b/python/ql/test/query-tests/Security/CWE-730/ReDoS.expected index 1286abcc0a70..5753edca788b 100644 --- a/python/ql/test/query-tests/Security/CWE-730/ReDoS.expected +++ b/python/ql/test/query-tests/Security/CWE-730/ReDoS.expected @@ -35,8 +35,6 @@ | redos.py:139:25:139:31 | (\\w\|G)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'G'. | | redos.py:145:25:145:32 | (\\d\|\\w)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '0'. | | redos.py:148:25:148:31 | (\\d\|5)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '5'. | -| redos.py:151:25:151:34 | (\\s\|[\\f])* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '\u000c'. | -| redos.py:157:25:157:34 | (\\f\|[\\f])* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '\u000c'. | | redos.py:160:25:160:32 | (\\W\|\\D)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of ' '. | | redos.py:163:25:163:32 | (\\S\|\\w)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '0'. | | redos.py:166:25:166:34 | (\\S\|[\\w])* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '0'. | From 72986e1e283296016dfdc20aebf05cf648f2e9dd Mon Sep 17 00:00:00 2001 From: Rasmus Lerchedahl Petersen Date: Wed, 30 Jun 2021 12:50:36 +0200 Subject: [PATCH 18/22] Python: Add some comments on the booelan sweep pattern --- python/ql/src/semmle/python/regex.qll | 32 +++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/python/ql/src/semmle/python/regex.qll b/python/ql/src/semmle/python/regex.qll index a251bab5f4f8..e73083210c27 100644 --- a/python/ql/src/semmle/python/regex.qll +++ b/python/ql/src/semmle/python/regex.qll @@ -127,7 +127,17 @@ abstract class RegexString extends Expr { result = this.(Unicode).getText() } - /** result is true for those start chars that actually mark a start of a char set. */ + /** + * Helper predicate for `char_set_start(int start, int end)`. + * + * In order to identify left brackets ('[') which actually start a character class, + * we perform a left to right scan of the string. + * + * To avoid negative recursion we return a boolean. See `escaping`, + * the helper for `escapingChar`, for a clean use of this pattern. + * + * result is true for those start chars that actually mark a start of a char set. + */ boolean char_set_start(int pos) { exists(int index | // is opening bracket @@ -176,9 +186,9 @@ abstract class RegexString extends Expr { ) } - /** - * Helper predicate for chars that could be character-set delimiters. - * Holds if the (non-escaped) char at `pos` in the string, is the (one-based) `index` occurrence of a bracket (`[` or `]`) in the string. + /** + * Helper predicate for chars that could be character-set delimiters. + * Holds if the (non-escaped) char at `pos` in the string, is the (one-based) `index` occurrence of a bracket (`[` or `]`) in the string. * Result if `true` is the char is `[`, and `false` if the char is `]`. */ boolean char_set_delimiter(int index, int pos) { @@ -267,6 +277,13 @@ abstract class RegexString extends Expr { ) } + /** + * Helper predicate for `charRange`. + * We can determine where character ranges end by a left to right sweep. + * + * To avoid negative recursion we return a boolean. See `escaping`, + * the helper for `escapingChar`, for a clean use of this pattern. + */ private boolean charRangeEnd(int charset_start, int index) { this.char_set_token(charset_start, index, _, _) and ( @@ -290,8 +307,15 @@ abstract class RegexString extends Expr { ) } + /** Holds if the character at `pos` is a "\" that is actually escaping what comes after. */ predicate escapingChar(int pos) { this.escaping(pos) = true } + /** + * Helper predicate for `escapingChar`. + * In order to avoid negative recusrion, we return a boolean. + * This way, we can refer to `escaping(pos - 1).booleanNot()` + * rather than to a negated version of `escaping(pos)`. + */ private boolean escaping(int pos) { pos = -1 and result = false or From 651f8abba0fed4ece72bce97f7fd6210269f1894 Mon Sep 17 00:00:00 2001 From: Rasmus Lerchedahl Petersen Date: Wed, 30 Jun 2021 14:39:49 +0200 Subject: [PATCH 19/22] Python: Avoid multiple results for `toString` --- python/ql/src/semmle/python/regex.qll | 6 ------ python/ql/test/library-tests/regex/SubstructureTests.ql | 8 ++++---- .../Expressions/Regex/BackspaceEscape.expected | 2 -- .../Expressions/Regex/DuplicateCharacterInSet.expected | 3 --- .../Expressions/Regex/MissingPartSpecialGroup.expected | 2 -- .../Expressions/Regex/UnmatchableCaret.expected | 4 ---- .../Expressions/Regex/UnmatchableDollar.expected | 4 ---- .../Security/CWE-020/IncompleteHostnameRegExp.expected | 1 - 8 files changed, 4 insertions(+), 26 deletions(-) diff --git a/python/ql/src/semmle/python/regex.qll b/python/ql/src/semmle/python/regex.qll index e73083210c27..e62e563ccf0d 100644 --- a/python/ql/src/semmle/python/regex.qll +++ b/python/ql/src/semmle/python/regex.qll @@ -121,12 +121,6 @@ deprecated string mode_from_mode_object(Value obj) { abstract class RegexString extends Expr { RegexString() { (this instanceof Bytes or this instanceof Unicode) } - override string toString() { - result = this.(Bytes).getText() - or - result = this.(Unicode).getText() - } - /** * Helper predicate for `char_set_start(int start, int end)`. * diff --git a/python/ql/test/library-tests/regex/SubstructureTests.ql b/python/ql/test/library-tests/regex/SubstructureTests.ql index f92f051ba783..cba3e56d08c3 100644 --- a/python/ql/test/library-tests/regex/SubstructureTests.ql +++ b/python/ql/test/library-tests/regex/SubstructureTests.ql @@ -13,7 +13,7 @@ class CharacterSetTest extends InlineExpectationsTest { exists(Regex re, int start, int end | re.charSet(start, end) and location = re.getLocation() and - element = re.toString().substring(start, end) and + element = re.getText().substring(start, end) and value = start + ":" + end and tag = "charSet" ) @@ -31,7 +31,7 @@ class CharacterRangeTest extends InlineExpectationsTest { exists(Regex re, int start, int lower_end, int upper_start, int end | re.charRange(_, start, lower_end, upper_start, end) and location = re.getLocation() and - element = re.toString().substring(start, end) and + element = re.getText().substring(start, end) and value = start + ":" + lower_end + "-" + upper_start + ":" + end and tag = "charRange" ) @@ -49,7 +49,7 @@ class EscapeTest extends InlineExpectationsTest { exists(Regex re, int start, int end | re.escapedCharacter(start, end) and location = re.getLocation() and - element = re.toString().substring(start, end) and + element = re.getText().substring(start, end) and value = start + ":" + end and tag = "escapedCharacter" ) @@ -67,7 +67,7 @@ class GroupTest extends InlineExpectationsTest { exists(Regex re, int start, int end | re.group(start, end) and location = re.getLocation() and - element = re.toString().substring(start, end) and + element = re.getText().substring(start, end) and value = start + ":" + end and tag = "group" ) diff --git a/python/ql/test/query-tests/Expressions/Regex/BackspaceEscape.expected b/python/ql/test/query-tests/Expressions/Regex/BackspaceEscape.expected index bb4ffe1e0701..1f9bf778bd3e 100644 --- a/python/ql/test/query-tests/Expressions/Regex/BackspaceEscape.expected +++ b/python/ql/test/query-tests/Expressions/Regex/BackspaceEscape.expected @@ -1,4 +1,2 @@ | test.py:17:12:17:22 | Str | Backspace escape in regular expression at offset 1. | -| test.py:17:12:17:22 | [\\b\\t ] | Backspace escape in regular expression at offset 1. | -| test.py:19:12:19:28 | E\\d+\\b[ \\b\\t] | Backspace escape in regular expression at offset 8. | | test.py:19:12:19:28 | Str | Backspace escape in regular expression at offset 8. | diff --git a/python/ql/test/query-tests/Expressions/Regex/DuplicateCharacterInSet.expected b/python/ql/test/query-tests/Expressions/Regex/DuplicateCharacterInSet.expected index 2e5f52beda8a..b00619280698 100644 --- a/python/ql/test/query-tests/Expressions/Regex/DuplicateCharacterInSet.expected +++ b/python/ql/test/query-tests/Expressions/Regex/DuplicateCharacterInSet.expected @@ -1,6 +1,3 @@ | test.py:46:12:46:18 | Str | This regular expression includes duplicate character 'A' in a set of characters. | -| test.py:46:12:46:18 | [AA] | This regular expression includes duplicate character 'A' in a set of characters. | | test.py:47:12:47:19 | Str | This regular expression includes duplicate character '0' in a set of characters. | -| test.py:47:12:47:19 | [000] | This regular expression includes duplicate character '0' in a set of characters. | | test.py:48:12:48:21 | Str | This regular expression includes duplicate character '-' in a set of characters. | -| test.py:48:12:48:21 | [-0-9-] | This regular expression includes duplicate character '-' in a set of characters. | diff --git a/python/ql/test/query-tests/Expressions/Regex/MissingPartSpecialGroup.expected b/python/ql/test/query-tests/Expressions/Regex/MissingPartSpecialGroup.expected index 138c5bd06dcc..e7bbad23899e 100644 --- a/python/ql/test/query-tests/Expressions/Regex/MissingPartSpecialGroup.expected +++ b/python/ql/test/query-tests/Expressions/Regex/MissingPartSpecialGroup.expected @@ -1,4 +1,2 @@ -| test.py:22:12:22:29 | (P[\\w]+) | Regular expression is missing '?' in named group. | | test.py:22:12:22:29 | Str | Regular expression is missing '?' in named group. | -| test.py:23:12:23:33 | (_(P[\\w]+)\|) | Regular expression is missing '?' in named group. | | test.py:23:12:23:33 | Str | Regular expression is missing '?' in named group. | diff --git a/python/ql/test/query-tests/Expressions/Regex/UnmatchableCaret.expected b/python/ql/test/query-tests/Expressions/Regex/UnmatchableCaret.expected index 7063dd1700c7..8b9f409ad848 100644 --- a/python/ql/test/query-tests/Expressions/Regex/UnmatchableCaret.expected +++ b/python/ql/test/query-tests/Expressions/Regex/UnmatchableCaret.expected @@ -1,8 +1,4 @@ -| test.py:4:12:4:19 | ^abc | This regular expression includes an unmatchable caret at offset 1. | | test.py:4:12:4:19 | Str | This regular expression includes an unmatchable caret at offset 1. | -| test.py:5:12:5:23 | (?s) ^abc | This regular expression includes an unmatchable caret at offset 5. | | test.py:5:12:5:23 | Str | This regular expression includes an unmatchable caret at offset 5. | | test.py:6:12:6:21 | Str | This regular expression includes an unmatchable caret at offset 2. | -| test.py:6:12:6:21 | \\[^123] | This regular expression includes an unmatchable caret at offset 2. | -| test.py:79:12:79:27 | (?<=foo)^\\w+ | This regular expression includes an unmatchable caret at offset 8. | | test.py:79:12:79:27 | Str | This regular expression includes an unmatchable caret at offset 8. | diff --git a/python/ql/test/query-tests/Expressions/Regex/UnmatchableDollar.expected b/python/ql/test/query-tests/Expressions/Regex/UnmatchableDollar.expected index b3c44e98bb51..f0f93436ce97 100644 --- a/python/ql/test/query-tests/Expressions/Regex/UnmatchableDollar.expected +++ b/python/ql/test/query-tests/Expressions/Regex/UnmatchableDollar.expected @@ -1,8 +1,4 @@ | test.py:29:12:29:19 | Str | This regular expression includes an unmatchable dollar at offset 3. | -| test.py:29:12:29:19 | abc$ | This regular expression includes an unmatchable dollar at offset 3. | | test.py:30:12:30:23 | Str | This regular expression includes an unmatchable dollar at offset 3. | -| test.py:30:12:30:23 | abc$ (?s) | This regular expression includes an unmatchable dollar at offset 3. | | test.py:31:12:31:20 | Str | This regular expression includes an unmatchable dollar at offset 2. | -| test.py:31:12:31:20 | \\[$] | This regular expression includes an unmatchable dollar at offset 2. | | test.py:80:12:80:26 | Str | This regular expression includes an unmatchable dollar at offset 3. | -| test.py:80:12:80:26 | \\w+$(?=foo) | This regular expression includes an unmatchable dollar at offset 3. | diff --git a/python/ql/test/query-tests/Security/CWE-020/IncompleteHostnameRegExp.expected b/python/ql/test/query-tests/Security/CWE-020/IncompleteHostnameRegExp.expected index 60c33c0e6531..00e0fcb95a0e 100644 --- a/python/ql/test/query-tests/Security/CWE-020/IncompleteHostnameRegExp.expected +++ b/python/ql/test/query-tests/Security/CWE-020/IncompleteHostnameRegExp.expected @@ -1,2 +1 @@ -| hosttest.py:6:27:6:51 | (www\|beta).example.com/ | This regular expression has an unescaped '.' before 'example.com', so it might match more hosts than expected. | | hosttest.py:6:27:6:51 | Str | This regular expression has an unescaped '.' before 'example.com', so it might match more hosts than expected. | From c306cee04e00c10ba37bcddaf1d443166e432089 Mon Sep 17 00:00:00 2001 From: Rasmus Lerchedahl Petersen Date: Wed, 30 Jun 2021 15:03:22 +0200 Subject: [PATCH 20/22] Python: mimic JS file hierarchy --- config/identical-files.json | 6 +++--- python/ql/src/Security/CWE-730/PolynomialBackTracking.ql | 2 +- python/ql/src/Security/CWE-730/PolynomialReDoS.ql | 2 +- python/ql/src/Security/CWE-730/ReDoS.ql | 2 +- .../performance}/ExponentialBackTracking.qll | 0 .../python/{regex => security/performance}/ReDoSUtil.qll | 0 .../{regex => security/performance}/RegExpTreeView.qll | 0 .../performance}/SuperlinearBackTracking.qll | 0 8 files changed, 6 insertions(+), 6 deletions(-) rename python/ql/src/semmle/python/{regex => security/performance}/ExponentialBackTracking.qll (100%) rename python/ql/src/semmle/python/{regex => security/performance}/ReDoSUtil.qll (100%) rename python/ql/src/semmle/python/{regex => security/performance}/RegExpTreeView.qll (100%) rename python/ql/src/semmle/python/{regex => security/performance}/SuperlinearBackTracking.qll (100%) diff --git a/config/identical-files.json b/config/identical-files.json index b3be90b75b22..e12be2c91c7c 100644 --- a/config/identical-files.json +++ b/config/identical-files.json @@ -451,14 +451,14 @@ ], "ReDoS Util Python/JS": [ "javascript/ql/src/semmle/javascript/security/performance/ReDoSUtil.qll", - "python/ql/src/semmle/python/regex/ReDoSUtil.qll" + "python/ql/src/semmle/python/security/performance/ReDoSUtil.qll" ], "ReDoS Exponential Python/JS": [ "javascript/ql/src/semmle/javascript/security/performance/ExponentialBackTracking.qll", - "python/ql/src/semmle/python/regex/ExponentialBackTracking.qll" + "python/ql/src/semmle/python/security/performance/ExponentialBackTracking.qll" ], "ReDoS Polynomial Python/JS": [ "javascript/ql/src/semmle/javascript/security/performance/SuperlinearBackTracking.qll", - "python/ql/src/semmle/python/regex/SuperlinearBackTracking.qll" + "python/ql/src/semmle/python/security/performance/SuperlinearBackTracking.qll" ] } diff --git a/python/ql/src/Security/CWE-730/PolynomialBackTracking.ql b/python/ql/src/Security/CWE-730/PolynomialBackTracking.ql index a98d4eefa7ec..628aca579552 100644 --- a/python/ql/src/Security/CWE-730/PolynomialBackTracking.ql +++ b/python/ql/src/Security/CWE-730/PolynomialBackTracking.ql @@ -1,5 +1,5 @@ import python -import semmle.python.regex.SuperlinearBackTracking +import semmle.python.security.performance.SuperlinearBackTracking from PolynomialBackTrackingTerm t where t.getLocation().getFile().getBaseName() = "KnownCVEs.py" diff --git a/python/ql/src/Security/CWE-730/PolynomialReDoS.ql b/python/ql/src/Security/CWE-730/PolynomialReDoS.ql index b948c1601a85..0d92ab2d3f56 100644 --- a/python/ql/src/Security/CWE-730/PolynomialReDoS.ql +++ b/python/ql/src/Security/CWE-730/PolynomialReDoS.ql @@ -12,7 +12,7 @@ */ import python -import semmle.python.regex.SuperlinearBackTracking +import semmle.python.security.performance.SuperlinearBackTracking import semmle.python.security.dataflow.PolynomialReDoS import DataFlow::PathGraph diff --git a/python/ql/src/Security/CWE-730/ReDoS.ql b/python/ql/src/Security/CWE-730/ReDoS.ql index aebc2f81cffa..e44699d7be88 100644 --- a/python/ql/src/Security/CWE-730/ReDoS.ql +++ b/python/ql/src/Security/CWE-730/ReDoS.ql @@ -13,7 +13,7 @@ */ import python -import semmle.python.regex.ExponentialBackTracking +import semmle.python.security.performance.ExponentialBackTracking from RegExpTerm t, string pump, State s, string prefixMsg where diff --git a/python/ql/src/semmle/python/regex/ExponentialBackTracking.qll b/python/ql/src/semmle/python/security/performance/ExponentialBackTracking.qll similarity index 100% rename from python/ql/src/semmle/python/regex/ExponentialBackTracking.qll rename to python/ql/src/semmle/python/security/performance/ExponentialBackTracking.qll diff --git a/python/ql/src/semmle/python/regex/ReDoSUtil.qll b/python/ql/src/semmle/python/security/performance/ReDoSUtil.qll similarity index 100% rename from python/ql/src/semmle/python/regex/ReDoSUtil.qll rename to python/ql/src/semmle/python/security/performance/ReDoSUtil.qll diff --git a/python/ql/src/semmle/python/regex/RegExpTreeView.qll b/python/ql/src/semmle/python/security/performance/RegExpTreeView.qll similarity index 100% rename from python/ql/src/semmle/python/regex/RegExpTreeView.qll rename to python/ql/src/semmle/python/security/performance/RegExpTreeView.qll diff --git a/python/ql/src/semmle/python/regex/SuperlinearBackTracking.qll b/python/ql/src/semmle/python/security/performance/SuperlinearBackTracking.qll similarity index 100% rename from python/ql/src/semmle/python/regex/SuperlinearBackTracking.qll rename to python/ql/src/semmle/python/security/performance/SuperlinearBackTracking.qll From 45e30b0c060006b60366879224c27d12a401d19f Mon Sep 17 00:00:00 2001 From: Rasmus Lerchedahl Petersen Date: Wed, 30 Jun 2021 15:04:37 +0200 Subject: [PATCH 21/22] Python: comment out temporarily unused predicate --- python/ql/src/semmle/python/RegexTreeView.qll | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/python/ql/src/semmle/python/RegexTreeView.qll b/python/ql/src/semmle/python/RegexTreeView.qll index cb819e7f9c19..2e8871dbe1af 100644 --- a/python/ql/src/semmle/python/RegexTreeView.qll +++ b/python/ql/src/semmle/python/RegexTreeView.qll @@ -479,18 +479,18 @@ class RegExpEscape extends RegExpNormalChar { // ) none() } - - /** - * Gets int value for the `index`th char in the hex number of the unicode escape. - * E.g. for `\u0061` and `index = 2` this returns 96 (the number `6` interpreted as hex). - */ - private int getHexValueFromUnicode(int index) { - isUnicode() and - exists(string hex, string char | hex = getText().suffix(2) | - char = hex.charAt(index) and - result = 16.pow(hex.length() - index - 1) * toHex(char) - ) - } + // TODO: Enable this once a supporting CLI is released. + // /** + // * Gets int value for the `index`th char in the hex number of the unicode escape. + // * E.g. for `\u0061` and `index = 2` this returns 96 (the number `6` interpreted as hex). + // */ + // private int getHexValueFromUnicode(int index) { + // isUnicode() and + // exists(string hex, string char | hex = getText().suffix(2) | + // char = hex.charAt(index) and + // result = 16.pow(hex.length() - index - 1) * toHex(char) + // ) + // } } /** From a176e6ac309c847e566662e8962a041b147bc5dc Mon Sep 17 00:00:00 2001 From: Rasmus Lerchedahl Petersen Date: Wed, 30 Jun 2021 15:28:31 +0200 Subject: [PATCH 22/22] Python: comment out temporarily unused predicate --- python/ql/src/semmle/python/RegexTreeView.qll | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/python/ql/src/semmle/python/RegexTreeView.qll b/python/ql/src/semmle/python/RegexTreeView.qll index 2e8871dbe1af..5f273c115480 100644 --- a/python/ql/src/semmle/python/RegexTreeView.qll +++ b/python/ql/src/semmle/python/RegexTreeView.qll @@ -493,26 +493,26 @@ class RegExpEscape extends RegExpNormalChar { // } } -/** - * Gets the hex number for the `hex` char. - */ -private int toHex(string hex) { - hex = [0 .. 9].toString() and - result = hex.toInt() - or - result = 10 and hex = ["a", "A"] - or - result = 11 and hex = ["b", "B"] - or - result = 12 and hex = ["c", "C"] - or - result = 13 and hex = ["d", "D"] - or - result = 14 and hex = ["e", "E"] - or - result = 15 and hex = ["f", "F"] -} - +// TODO: Enable this once a supporting CLI is released. +// /** +// * Gets the hex number for the `hex` char. +// */ +// private int toHex(string hex) { +// hex = [0 .. 9].toString() and +// result = hex.toInt() +// or +// result = 10 and hex = ["a", "A"] +// or +// result = 11 and hex = ["b", "B"] +// or +// result = 12 and hex = ["c", "C"] +// or +// result = 13 and hex = ["d", "D"] +// or +// result = 14 and hex = ["e", "E"] +// or +// result = 15 and hex = ["f", "F"] +// } /** * A character class escape in a regular expression. * That is, an escaped charachter that denotes multiple characters.