xml-spec.js 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259
  1. describe("XML grammar", function() {
  2. let grammar = null;
  3. beforeEach(function() {
  4. waitsForPromise(() => atom.packages.activatePackage("language-xml"));
  5. runs(() => grammar = atom.grammars.grammarForScopeName("text.xml"));
  6. });
  7. it("parses the grammar", function() {
  8. expect(grammar).toBeTruthy();
  9. expect(grammar.scopeName).toBe("text.xml");
  10. });
  11. it("tokenizes comments in internal subsets correctly", function() {
  12. const lines = grammar.tokenizeLines(`\
  13. <!DOCTYPE root [
  14. <a> <!-- [] -->
  15. <b> <!-- [] -->
  16. <c> <!-- [] -->
  17. ]>\
  18. `
  19. );
  20. expect(lines[1][1]).toEqual({value: '<!--', scopes: ['text.xml', 'meta.tag.sgml.doctype.xml', 'meta.internalsubset.xml', 'comment.block.xml', 'punctuation.definition.comment.xml']});
  21. expect(lines[2][1]).toEqual({value: '<!--', scopes: ['text.xml', 'meta.tag.sgml.doctype.xml', 'meta.internalsubset.xml', 'comment.block.xml', 'punctuation.definition.comment.xml']});
  22. expect(lines[3][1]).toEqual({value: '<!--', scopes: ['text.xml', 'meta.tag.sgml.doctype.xml', 'meta.internalsubset.xml', 'comment.block.xml', 'punctuation.definition.comment.xml']});
  23. });
  24. it('tokenizes comment endings with more than two dashes as invalid', function() {
  25. const {tokens} = grammar.tokenizeLine('<!-- invalid comment --->');
  26. expect(tokens[0]).toEqual({value: '<!--', scopes: ['text.xml', 'comment.block.xml', 'punctuation.definition.comment.xml']});
  27. expect(tokens[1]).toEqual({value: ' invalid comment ', scopes: ['text.xml', 'comment.block.xml']});
  28. expect(tokens[2]).toEqual({value: '--', scopes: ['text.xml', 'comment.block.xml', 'invalid.illegal.bad-comments-or-CDATA.xml']});
  29. });
  30. it('tokenizes comments with two dashes not followed by ">" as invalid', function() {
  31. const {tokens} = grammar.tokenizeLine('<!-- invalid -- comment -->');
  32. expect(tokens[0]).toEqual({value: '<!--', scopes: ['text.xml', 'comment.block.xml', 'punctuation.definition.comment.xml']});
  33. expect(tokens[1]).toEqual({value: ' invalid ', scopes: ['text.xml', 'comment.block.xml']});
  34. expect(tokens[2]).toEqual({value: '--', scopes: ['text.xml', 'comment.block.xml', 'invalid.illegal.bad-comments-or-CDATA.xml']});
  35. expect(tokens[3]).toEqual({value: ' comment -->', scopes: ['text.xml', 'comment.block.xml']});
  36. });
  37. it("tokenizes empty element meta.tag.no-content.xml", function() {
  38. const {tokens} = grammar.tokenizeLine('<n></n>');
  39. expect(tokens[0]).toEqual({value: '<', scopes: ['text.xml', 'meta.tag.no-content.xml', 'punctuation.definition.tag.xml']});
  40. expect(tokens[1]).toEqual({value: 'n', scopes: ['text.xml', 'meta.tag.no-content.xml', 'entity.name.tag.xml', 'entity.name.tag.localname.xml']});
  41. expect(tokens[2]).toEqual({value: '>', scopes: ['text.xml', 'meta.tag.no-content.xml', 'punctuation.definition.tag.xml']});
  42. expect(tokens[3]).toEqual({value: '</', scopes: ['text.xml', 'meta.tag.no-content.xml', 'punctuation.definition.tag.xml']});
  43. expect(tokens[4]).toEqual({value: 'n', scopes: ['text.xml', 'meta.tag.no-content.xml', 'entity.name.tag.xml', 'entity.name.tag.localname.xml']});
  44. expect(tokens[5]).toEqual({value: '>', scopes: ['text.xml', 'meta.tag.no-content.xml', 'punctuation.definition.tag.xml']});
  45. });
  46. it("tokenizes attribute-name of multi-line tag", function() {
  47. const linesWithIndent = grammar.tokenizeLines(`\
  48. <el
  49. attrName="attrValue">
  50. </el>\
  51. `
  52. );
  53. expect(linesWithIndent[1][1]).toEqual({value: 'attrName', scopes: ['text.xml', 'meta.tag.xml', 'entity.other.attribute-name.localname.xml']});
  54. const linesWithoutIndent = grammar.tokenizeLines(`\
  55. <el
  56. attrName="attrValue">
  57. </el>\
  58. `
  59. );
  60. expect(linesWithoutIndent[1][0]).toEqual({value: 'attrName', scopes: ['text.xml', 'meta.tag.xml', 'entity.other.attribute-name.localname.xml']});
  61. });
  62. it("tokenizes attribute-name.namespace contains period", function() {
  63. const lines = grammar.tokenizeLines(`\
  64. <el name.space:attrName="attrValue">
  65. </el>\
  66. `
  67. );
  68. expect(lines[0][3]).toEqual({value: 'name.space', scopes: ['text.xml', 'meta.tag.xml', 'entity.other.attribute-name.namespace.xml']});
  69. });
  70. it("tokenizes attribute-name.namespace contains East-Asian Kanji", function() {
  71. const lines = grammar.tokenizeLines(`\
  72. <el 名前空間名:attrName="attrValue">
  73. </el>\
  74. `
  75. );
  76. expect(lines[0][3]).toEqual({value: '名前空間名', scopes: ['text.xml', 'meta.tag.xml', 'entity.other.attribute-name.namespace.xml']});
  77. });
  78. it("tokenizes attribute-name.localname contains period", function() {
  79. const lines = grammar.tokenizeLines(`\
  80. <el attr.name="attrValue">
  81. </el>\
  82. `
  83. );
  84. expect(lines[0][3]).toEqual({value: 'attr.name', scopes: ['text.xml', 'meta.tag.xml', 'entity.other.attribute-name.localname.xml']});
  85. });
  86. it("tokenizes attribute-name.localname contains colon", function() {
  87. const lines = grammar.tokenizeLines(`\
  88. <el namespace:attr:name="attrValue">
  89. </el>\
  90. `
  91. );
  92. expect(lines[0][5]).toEqual({value: 'attr:name', scopes: ['text.xml', 'meta.tag.xml', 'entity.other.attribute-name.localname.xml']});
  93. });
  94. it("tokenizes attribute-name.localname contains East-Asian Kanji", function() {
  95. const lines = grammar.tokenizeLines(`\
  96. <el 属性名="attrValue">
  97. </el>\
  98. `
  99. );
  100. expect(lines[0][3]).toEqual({value: '属性名', scopes: ['text.xml', 'meta.tag.xml', 'entity.other.attribute-name.localname.xml']});
  101. });
  102. it("tokenizes attribute-name.localname when followed by spaces", function() {
  103. const lines = grammar.tokenizeLines(`\
  104. <el attrName ="attrValue">
  105. </el>\
  106. `
  107. );
  108. expect(lines[0][3]).toEqual({value: 'attrName', scopes: ['text.xml', 'meta.tag.xml', 'entity.other.attribute-name.localname.xml']});
  109. });
  110. describe("firstLineMatch", function() {
  111. it("recognises Emacs modelines", function() {
  112. let line;
  113. const valid = `\
  114. #-*-xml-*-
  115. #-*-mode:xml-*-
  116. /* -*-xml-*- */
  117. // -*- XML -*-
  118. /* -*- mode:xml -*- */
  119. // -*- font:bar;mode:XML -*-
  120. // -*- font:bar;mode:XMl;foo:bar; -*-
  121. // -*-font:mode;mode:XML-*-
  122. // -*- foo:bar mode: xml bar:baz -*-
  123. " -*-foo:bar;mode:xML;bar:foo-*- ";
  124. " -*-font-mode:foo;mode:XML;foo-bar:quux-*-"
  125. "-*-font:x;foo:bar; mode : xml;bar:foo;foooooo:baaaaar;fo:ba;-*-";
  126. "-*- font:x;foo : bar ; mode : xMl ; bar : foo ; foooooo:baaaaar;fo:ba-*-";\
  127. `;
  128. for (line of valid.split(/\n/)) {
  129. expect(grammar.firstLineRegex.findNextMatchSync(line)).not.toBeNull();
  130. }
  131. const invalid = `\
  132. /* --*XML-*- */
  133. /* -*-- XML -*-
  134. /* -*- -- XML -*-
  135. /* -*- HXML -;- -*-
  136. // -*- iXML -*-
  137. // -*- XML; -*-
  138. // -*- xml-stuff -*-
  139. /* -*- model:xml -*-
  140. /* -*- indent-mode:xml -*-
  141. // -*- font:mode;xml -*-
  142. // -*- mode: -*- XML
  143. // -*- mode: grok-with-xml -*-
  144. // -*-font:mode;mode:xml--*-\
  145. `;
  146. return (() => {
  147. const result = [];
  148. for (line of invalid.split(/\n/)) {
  149. result.push(expect(grammar.firstLineRegex.findNextMatchSync(line)).toBeNull());
  150. }
  151. return result;
  152. })();
  153. });
  154. it("recognises Vim modelines", function() {
  155. let line;
  156. const valid = `\
  157. vim: se filetype=xml:
  158. # vim: se ft=xml:
  159. # vim: set ft=xml:
  160. # vim: set filetype=XML:
  161. # vim: ft=xml
  162. # vim: syntax=xML
  163. # vim: se syntax=XML:
  164. # ex: syntax=xml
  165. # vim:ft=xml
  166. # vim600: ft=xml
  167. # vim>600: set ft=xml:
  168. # vi:noai:sw=3 ts=6 ft=xml
  169. # vi::::::::::noai:::::::::::: ft=xml
  170. # vim:ts=4:sts=4:sw=4:noexpandtab:ft=xml
  171. # vi:: noai : : : : sw =3 ts =6 ft =xml
  172. # vim: ts=4: pi sts=4: ft=xml: noexpandtab: sw=4:
  173. # vim: ts=4 sts=4: ft=xml noexpandtab:
  174. # vim:noexpandtab sts=4 ft=xml ts=4
  175. # vim:noexpandtab:ft=xml
  176. # vim:ts=4:sts=4 ft=xml:noexpandtab:\x20
  177. # vim:noexpandtab titlestring=hi\|there\\\\ ft=xml ts=4\
  178. `;
  179. for (line of valid.split(/\n/)) {
  180. expect(grammar.firstLineRegex.findNextMatchSync(line)).not.toBeNull();
  181. }
  182. const invalid = `\
  183. ex: se filetype=xml:
  184. _vi: se filetype=xml:
  185. vi: se filetype=xml
  186. # vim set ft=xmlz
  187. # vim: soft=xml
  188. # vim: hairy-syntax=xml:
  189. # vim set ft=xml:
  190. # vim: setft=xml:
  191. # vim: se ft=xml backupdir=tmp
  192. # vim: set ft=xml set cmdheight=1
  193. # vim:noexpandtab sts:4 ft:xml ts:4
  194. # vim:noexpandtab titlestring=hi\\|there\\ ft=xml ts=4
  195. # vim:noexpandtab titlestring=hi\\|there\\\\\\ ft=xml ts=4\
  196. `;
  197. return (() => {
  198. const result = [];
  199. for (line of invalid.split(/\n/)) {
  200. result.push(expect(grammar.firstLineRegex.findNextMatchSync(line)).toBeNull());
  201. }
  202. return result;
  203. })();
  204. });
  205. it("recognises a valid XML declaration", function() {
  206. let line;
  207. const valid = `\
  208. <?xml version="1.0"?>
  209. <?xml version="1.0" encoding="UTF-8"?>
  210. <?xml version="1.1" standalone="yes" ?>
  211. <?xml version = '1.0' ?>
  212. <?xml version="1.0" encoding='UTF-8' standalone='no' ?>\
  213. `;
  214. for (line of valid.split(/\n/)) {
  215. expect(grammar.firstLineRegex.findNextMatchSync(line)).not.toBeNull();
  216. }
  217. const invalid = `\
  218. <?XML version="1.0"?>
  219. <?xml version="1.0'?>
  220. <?xml version='1.0"?>
  221. <?xml version="2.0"?>
  222. <?xml encoding="UTF-8" version="1.0" ?>
  223. <?xml version="1.0" standalone="nah" ?>
  224. <?xml version=1.0 ?>
  225. <?xml version="1.0">\
  226. `;
  227. return (() => {
  228. const result = [];
  229. for (line of invalid.split(/\n/)) {
  230. result.push(expect(grammar.firstLineRegex.findNextMatchSync(line)).toBeNull());
  231. }
  232. return result;
  233. })();
  234. });
  235. });
  236. });