test_csslexer.js (8717B)
1 /* This Source Code Form is subject to the terms of the Mozilla Public 2 * License, v. 2.0. If a copy of the MPL was not distributed with this 3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 */ 5 6 "use strict"; 7 8 const { 9 InspectorCSSParserWrapper, 10 } = require("resource://devtools/shared/css/lexer.js"); 11 12 add_task(function test_lexer() { 13 const LEX_TESTS = [ 14 ["simple", [{ tokenType: "Ident", text: "simple", value: "simple" }]], 15 [ 16 "simple: { hi; }", 17 [ 18 { tokenType: "Ident", text: "simple", value: "simple" }, 19 { tokenType: "Colon", text: ":" }, 20 { tokenType: "WhiteSpace", text: " " }, 21 { tokenType: "CurlyBracketBlock", text: "{" }, 22 { tokenType: "WhiteSpace", text: " " }, 23 { tokenType: "Ident", text: "hi", value: "hi" }, 24 { tokenType: "Semicolon", text: ";" }, 25 { tokenType: "WhiteSpace", text: " " }, 26 { tokenType: "CloseCurlyBracket", text: "}" }, 27 ], 28 ], 29 [ 30 "/* whatever */", 31 [{ tokenType: "Comment", text: "/* whatever */", value: " whatever " }], 32 ], 33 [ 34 "'string'", 35 [{ tokenType: "QuotedString", text: "'string'", value: "string" }], 36 ], 37 [ 38 '"string"', 39 [{ tokenType: "QuotedString", text: `"string"`, value: "string" }], 40 ], 41 [ 42 "rgb(1,2,3)", 43 [ 44 { tokenType: "Function", text: "rgb(", value: "rgb" }, 45 { tokenType: "Number", text: "1", number: 1 }, 46 { tokenType: "Comma", text: "," }, 47 { tokenType: "Number", text: "2", number: 2 }, 48 { tokenType: "Comma", text: "," }, 49 { tokenType: "Number", text: "3", number: 3 }, 50 { tokenType: "CloseParenthesis", text: ")" }, 51 ], 52 ], 53 ["@media", [{ tokenType: "AtKeyword", text: "@media", value: "media" }]], 54 ["#hibob", [{ tokenType: "IDHash", text: "#hibob", value: "hibob" }]], 55 ["#123", [{ tokenType: "Hash", text: "#123", value: "123" }]], 56 [ 57 "23px", 58 [{ tokenType: "Dimension", text: "23px", number: 23, unit: "px" }], 59 ], 60 ["23%", [{ tokenType: "Percentage", text: "23%", number: 0.23 }]], 61 [ 62 "url(http://example.com)", 63 [ 64 { 65 tokenType: "UnquotedUrl", 66 text: "url(http://example.com)", 67 value: "http://example.com", 68 }, 69 ], 70 ], 71 [ 72 "url('http://example.com')", 73 [ 74 { tokenType: "Function", text: "url(", value: "url" }, 75 { 76 tokenType: "QuotedString", 77 text: "'http://example.com'", 78 value: "http://example.com", 79 }, 80 { tokenType: "CloseParenthesis", text: ")" }, 81 ], 82 ], 83 [ 84 "url( 'http://example.com' )", 85 [ 86 { tokenType: "Function", text: "url(", value: "url" }, 87 { tokenType: "WhiteSpace", text: " " }, 88 { 89 tokenType: "QuotedString", 90 text: "'http://example.com'", 91 value: "http://example.com", 92 }, 93 { tokenType: "WhiteSpace", text: " " }, 94 { tokenType: "CloseParenthesis", text: ")" }, 95 ], 96 ], 97 // In CSS Level 3, this is an ordinary URL, not a BAD_URL. 98 [ 99 "url(http://example.com", 100 [ 101 { 102 tokenType: "UnquotedUrl", 103 text: "url(http://example.com", 104 value: "http://example.com", 105 }, 106 ], 107 ], 108 [ 109 "url(http://example.com @", 110 [ 111 { 112 tokenType: "BadUrl", 113 text: "url(http://example.com @", 114 value: "http://example.com @", 115 }, 116 ], 117 ], 118 [ 119 "quo\\ting", 120 [{ tokenType: "Ident", text: "quo\\ting", value: "quoting" }], 121 ], 122 [ 123 "'bad string\n", 124 [ 125 { tokenType: "BadString", text: "'bad string", value: "bad string" }, 126 { tokenType: "WhiteSpace", text: "\n" }, 127 ], 128 ], 129 ["~=", [{ tokenType: "IncludeMatch", text: "~=" }]], 130 ["|=", [{ tokenType: "DashMatch", text: "|=" }]], 131 ["^=", [{ tokenType: "PrefixMatch", text: "^=" }]], 132 ["$=", [{ tokenType: "SuffixMatch", text: "$=" }]], 133 ["*=", [{ tokenType: "SubstringMatch", text: "*=" }]], 134 135 [ 136 "<!-- html comment -->", 137 [ 138 { tokenType: "CDO", text: "<!--" }, 139 { tokenType: "WhiteSpace", text: " " }, 140 { tokenType: "Ident", text: "html", value: "html" }, 141 { tokenType: "WhiteSpace", text: " " }, 142 { tokenType: "Ident", text: "comment", value: "comment" }, 143 { tokenType: "WhiteSpace", text: " " }, 144 { tokenType: "CDC", text: "-->" }, 145 ], 146 ], 147 148 // earlier versions of CSS had "bad comment" tokens, but in level 3, 149 // unterminated comments are just comments. 150 [ 151 "/* bad comment", 152 [{ tokenType: "Comment", text: "/* bad comment", value: " bad comment" }], 153 ], 154 ]; 155 156 const test = (cssText, tokenTypes) => { 157 const lexer = new InspectorCSSParserWrapper(cssText); 158 let reconstructed = ""; 159 let lastTokenEnd = 0; 160 let i = 0; 161 let token; 162 while ((token = lexer.nextToken())) { 163 const expectedToken = tokenTypes[i]; 164 Assert.deepEqual( 165 { 166 tokenType: token.tokenType, 167 text: token.text, 168 value: token.value, 169 number: token.number, 170 unit: token.unit, 171 }, 172 { 173 tokenType: expectedToken.tokenType, 174 text: expectedToken.text, 175 value: expectedToken.value ?? null, 176 number: expectedToken.number ?? null, 177 unit: expectedToken.unit ?? null, 178 }, 179 `Got expected token #${i} for "${cssText}"` 180 ); 181 182 Assert.greater(token.endOffset, token.startOffset); 183 equal(token.startOffset, lastTokenEnd); 184 lastTokenEnd = token.endOffset; 185 reconstructed += cssText.substring(token.startOffset, token.endOffset); 186 ++i; 187 } 188 // Ensure that we saw the correct number of tokens. 189 equal(i, tokenTypes.length); 190 // Ensure that the reported offsets cover all the text. 191 equal(reconstructed, cssText); 192 }; 193 194 for (const [cssText, rustTokenTypes] of LEX_TESTS) { 195 info(`Test "${cssText}"`); 196 test(cssText, rustTokenTypes); 197 } 198 }); 199 200 add_task(function test_lexer_linecol() { 201 const LINECOL_TESTS = [ 202 ["simple", ["Ident:0:0", ":0:6"]], 203 ["\n stuff", ["WhiteSpace:0:0", "Ident:1:4", ":1:9"]], 204 [ 205 '"string with \\\nnewline" \r\n', 206 ["QuotedString:0:0", "WhiteSpace:1:8", ":2:0"], 207 ], 208 ]; 209 210 const test = (cssText, locations) => { 211 const lexer = new InspectorCSSParserWrapper(cssText); 212 let i = 0; 213 let token; 214 const testLocation = () => { 215 const startLine = lexer.parser.lineNumber; 216 const startColumn = lexer.parser.columnNumber; 217 218 // We do this in a bit of a funny way so that we can also test the 219 // location of the EOF. 220 let combined = ":" + startLine + ":" + startColumn; 221 if (token) { 222 combined = token.tokenType + combined; 223 } 224 225 equal(combined, locations[i]); 226 ++i; 227 }; 228 while ((token = lexer.nextToken())) { 229 testLocation(); 230 } 231 // Collect location after we consumed all the tokens 232 testLocation(); 233 // Ensure that we saw the correct number of tokens. 234 equal(i, locations.length); 235 }; 236 237 for (const [cssText, rustLocations] of LINECOL_TESTS) { 238 info(`Test "${cssText}"`); 239 test(cssText, rustLocations); 240 } 241 }); 242 243 add_task(function test_lexer_eofchar() { 244 const EOFCHAR_TESTS = [ 245 ["hello", "hello"], 246 ["hello \\", "hello \\\\"], 247 ["'hello", "'hello'"], 248 ['"hello', '"hello"'], 249 ["'hello\\", "'hello\\\\'"], 250 ['"hello\\', '"hello\\\\"'], 251 ["/*hello", "/*hello*/"], 252 ["/*hello*", "/*hello*/"], 253 ["/*hello\\", "/*hello\\*/"], 254 ["url(hello", "url(hello)"], 255 ["url('hello", "url('hello')"], 256 ['url("hello', 'url("hello")'], 257 ["url(hello\\", "url(hello\\\\)"], 258 ["url('hello\\", "url('hello\\\\')"], 259 ['url("hello\\', 'url("hello\\\\")'], 260 // Ensure that passing a different inputString to performEOFFixup 261 // doesn't cause an assertion trying to strip a backslash from the 262 // end of an empty string. 263 ["'\\", "\\'", ""], 264 // Check single-char quotes 265 [`"`, `""`], 266 [`'`, `''`], 267 ]; 268 269 const test = (cssText, expectedAppend, argText) => { 270 const lexer = new InspectorCSSParserWrapper(cssText, { 271 trackEOFChars: true, 272 }); 273 while (lexer.nextToken()) { 274 // We don't need to do anything with the tokens. We only want to consume the iterator 275 // so we can safely call performEOFFixup. 276 } 277 278 info("EOF char test, input = " + cssText); 279 equal(lexer.performEOFFixup(argText), expectedAppend); 280 }; 281 282 for (const [cssText, expectedAppend, argText = cssText] of EOFCHAR_TESTS) { 283 info(`Test "${cssText}"`); 284 test(cssText, expectedAppend, argText); 285 } 286 });