123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497 |
- const NullGrammar = require('../src/null-grammar');
- const TextMateLanguageMode = require('../src/text-mate-language-mode');
- const TextBuffer = require('text-buffer');
- const { Point } = TextBuffer;
- const _ = require('underscore-plus');
- const dedent = require('dedent');
- describe('TextMateLanguageMode', () => {
- let languageMode, buffer, config;
- beforeEach(async () => {
- config = atom.config;
- config.set('core.useTreeSitterParsers', false);
- // enable async tokenization
- TextMateLanguageMode.prototype.chunkSize = 5;
- jasmine.unspy(TextMateLanguageMode.prototype, 'tokenizeInBackground');
- await atom.packages.activatePackage('language-javascript');
- });
- afterEach(() => {
- buffer && buffer.destroy();
- languageMode && languageMode.destroy();
- config.unset('core.useTreeSitterParsers');
- });
- describe('when the editor is constructed with the largeFileMode option set to true', () => {
- it("loads the editor but doesn't tokenize", async () => {
- const line = 'a b c d\n';
- buffer = new TextBuffer(line.repeat(256 * 1024));
- expect(buffer.getText().length).toBe(2 * 1024 * 1024);
- languageMode = new TextMateLanguageMode({
- buffer,
- grammar: atom.grammars.grammarForScopeName('source.js'),
- tabLength: 2
- });
- buffer.setLanguageMode(languageMode);
- expect(languageMode.isRowCommented(0)).toBeFalsy();
- // It treats the entire line as one big token
- let iterator = languageMode.buildHighlightIterator();
- iterator.seek({ row: 0, column: 0 });
- iterator.moveToSuccessor();
- expect(iterator.getPosition()).toEqual({ row: 0, column: 7 });
- buffer.insert([0, 0], 'hey"');
- iterator = languageMode.buildHighlightIterator();
- iterator.seek({ row: 0, column: 0 });
- iterator.moveToSuccessor();
- expect(iterator.getPosition()).toEqual({ row: 0, column: 11 });
- });
- });
- describe('tokenizing', () => {
- describe('when the buffer is destroyed', () => {
- beforeEach(() => {
- buffer = atom.project.bufferForPathSync('sample.js');
- languageMode = new TextMateLanguageMode({
- buffer,
- config,
- grammar: atom.grammars.grammarForScopeName('source.js')
- });
- languageMode.startTokenizing();
- });
- it('stops tokenization', () => {
- languageMode.destroy();
- spyOn(languageMode, 'tokenizeNextChunk');
- advanceClock();
- expect(languageMode.tokenizeNextChunk).not.toHaveBeenCalled();
- });
- });
- describe('when the buffer contains soft-tabs', () => {
- beforeEach(() => {
- buffer = atom.project.bufferForPathSync('sample.js');
- languageMode = new TextMateLanguageMode({
- buffer,
- config,
- grammar: atom.grammars.grammarForScopeName('source.js')
- });
- buffer.setLanguageMode(languageMode);
- languageMode.startTokenizing();
- });
- afterEach(() => {
- languageMode.destroy();
- buffer.release();
- });
- describe('on construction', () =>
- it('tokenizes lines chunk at a time in the background', () => {
- const line0 = languageMode.tokenizedLines[0];
- expect(line0).toBeUndefined();
- const line11 = languageMode.tokenizedLines[11];
- expect(line11).toBeUndefined();
- // tokenize chunk 1
- advanceClock();
- expect(languageMode.tokenizedLines[0].ruleStack != null).toBeTruthy();
- expect(languageMode.tokenizedLines[4].ruleStack != null).toBeTruthy();
- expect(languageMode.tokenizedLines[5]).toBeUndefined();
- // tokenize chunk 2
- advanceClock();
- expect(languageMode.tokenizedLines[5].ruleStack != null).toBeTruthy();
- expect(languageMode.tokenizedLines[9].ruleStack != null).toBeTruthy();
- expect(languageMode.tokenizedLines[10]).toBeUndefined();
- // tokenize last chunk
- advanceClock();
- expect(
- languageMode.tokenizedLines[10].ruleStack != null
- ).toBeTruthy();
- expect(
- languageMode.tokenizedLines[12].ruleStack != null
- ).toBeTruthy();
- }));
- describe('when the buffer is partially tokenized', () => {
- beforeEach(() => {
- // tokenize chunk 1 only
- advanceClock();
- });
- describe('when there is a buffer change inside the tokenized region', () => {
- describe('when lines are added', () => {
- it('pushes the invalid rows down', () => {
- expect(languageMode.firstInvalidRow()).toBe(5);
- buffer.insert([1, 0], '\n\n');
- expect(languageMode.firstInvalidRow()).toBe(7);
- });
- });
- describe('when lines are removed', () => {
- it('pulls the invalid rows up', () => {
- expect(languageMode.firstInvalidRow()).toBe(5);
- buffer.delete([[1, 0], [3, 0]]);
- expect(languageMode.firstInvalidRow()).toBe(2);
- });
- });
- describe('when the change invalidates all the lines before the current invalid region', () => {
- it('retokenizes the invalidated lines and continues into the valid region', () => {
- expect(languageMode.firstInvalidRow()).toBe(5);
- buffer.insert([2, 0], '/*');
- expect(languageMode.firstInvalidRow()).toBe(3);
- advanceClock();
- expect(languageMode.firstInvalidRow()).toBe(8);
- });
- });
- });
- describe('when there is a buffer change surrounding an invalid row', () => {
- it('pushes the invalid row to the end of the change', () => {
- buffer.setTextInRange([[4, 0], [6, 0]], '\n\n\n');
- expect(languageMode.firstInvalidRow()).toBe(8);
- });
- });
- describe('when there is a buffer change inside an invalid region', () => {
- it('does not attempt to tokenize the lines in the change, and preserves the existing invalid row', () => {
- expect(languageMode.firstInvalidRow()).toBe(5);
- buffer.setTextInRange([[6, 0], [7, 0]], '\n\n\n');
- expect(languageMode.tokenizedLines[6]).toBeUndefined();
- expect(languageMode.tokenizedLines[7]).toBeUndefined();
- expect(languageMode.firstInvalidRow()).toBe(5);
- });
- });
- });
- describe('when the buffer is fully tokenized', () => {
- beforeEach(() => fullyTokenize(languageMode));
- describe('when there is a buffer change that is smaller than the chunk size', () => {
- describe('when lines are updated, but none are added or removed', () => {
- it('updates tokens to reflect the change', () => {
- buffer.setTextInRange([[0, 0], [2, 0]], 'foo()\n7\n');
- expect(languageMode.tokenizedLines[0].tokens[1]).toEqual({
- value: '(',
- scopes: [
- 'source.js',
- 'meta.function-call.js',
- 'meta.arguments.js',
- 'punctuation.definition.arguments.begin.bracket.round.js'
- ]
- });
- expect(languageMode.tokenizedLines[1].tokens[0]).toEqual({
- value: '7',
- scopes: ['source.js', 'constant.numeric.decimal.js']
- });
- // line 2 is unchanged
- expect(languageMode.tokenizedLines[2].tokens[1]).toEqual({
- value: 'if',
- scopes: ['source.js', 'keyword.control.js']
- });
- });
- describe('when the change invalidates the tokenization of subsequent lines', () => {
- it('schedules the invalidated lines to be tokenized in the background', () => {
- buffer.insert([5, 30], '/* */');
- buffer.insert([2, 0], '/*');
- expect(languageMode.tokenizedLines[3].tokens[0].scopes).toEqual(
- ['source.js']
- );
- advanceClock();
- expect(languageMode.tokenizedLines[3].tokens[0].scopes).toEqual(
- ['source.js', 'comment.block.js']
- );
- expect(languageMode.tokenizedLines[4].tokens[0].scopes).toEqual(
- ['source.js', 'comment.block.js']
- );
- expect(languageMode.tokenizedLines[5].tokens[0].scopes).toEqual(
- ['source.js', 'comment.block.js']
- );
- });
- });
- it('resumes highlighting with the state of the previous line', () => {
- buffer.insert([0, 0], '/*');
- buffer.insert([5, 0], '*/');
- buffer.insert([1, 0], 'var ');
- expect(languageMode.tokenizedLines[1].tokens[0].scopes).toEqual([
- 'source.js',
- 'comment.block.js'
- ]);
- });
- });
- describe('when lines are both updated and removed', () => {
- it('updates tokens to reflect the change', () => {
- buffer.setTextInRange([[1, 0], [3, 0]], 'foo()');
- // previous line 0 remains
- expect(languageMode.tokenizedLines[0].tokens[0]).toEqual({
- value: 'var',
- scopes: ['source.js', 'storage.type.var.js']
- });
- // previous line 3 should be combined with input to form line 1
- expect(languageMode.tokenizedLines[1].tokens[0]).toEqual({
- value: 'foo',
- scopes: [
- 'source.js',
- 'meta.function-call.js',
- 'entity.name.function.js'
- ]
- });
- expect(languageMode.tokenizedLines[1].tokens[6]).toEqual({
- value: '=',
- scopes: ['source.js', 'keyword.operator.assignment.js']
- });
- // lines below deleted regions should be shifted upward
- expect(languageMode.tokenizedLines[2].tokens[1]).toEqual({
- value: 'while',
- scopes: ['source.js', 'keyword.control.js']
- });
- expect(languageMode.tokenizedLines[3].tokens[1]).toEqual({
- value: '=',
- scopes: ['source.js', 'keyword.operator.assignment.js']
- });
- expect(languageMode.tokenizedLines[4].tokens[1]).toEqual({
- value: '<',
- scopes: ['source.js', 'keyword.operator.comparison.js']
- });
- });
- });
- describe('when the change invalidates the tokenization of subsequent lines', () => {
- it('schedules the invalidated lines to be tokenized in the background', () => {
- buffer.insert([5, 30], '/* */');
- buffer.setTextInRange([[2, 0], [3, 0]], '/*');
- expect(languageMode.tokenizedLines[2].tokens[0].scopes).toEqual([
- 'source.js',
- 'comment.block.js',
- 'punctuation.definition.comment.begin.js'
- ]);
- expect(languageMode.tokenizedLines[3].tokens[0].scopes).toEqual([
- 'source.js'
- ]);
- advanceClock();
- expect(languageMode.tokenizedLines[3].tokens[0].scopes).toEqual([
- 'source.js',
- 'comment.block.js'
- ]);
- expect(languageMode.tokenizedLines[4].tokens[0].scopes).toEqual([
- 'source.js',
- 'comment.block.js'
- ]);
- });
- });
- describe('when lines are both updated and inserted', () => {
- it('updates tokens to reflect the change', () => {
- buffer.setTextInRange(
- [[1, 0], [2, 0]],
- 'foo()\nbar()\nbaz()\nquux()'
- );
- // previous line 0 remains
- expect(languageMode.tokenizedLines[0].tokens[0]).toEqual({
- value: 'var',
- scopes: ['source.js', 'storage.type.var.js']
- });
- // 3 new lines inserted
- expect(languageMode.tokenizedLines[1].tokens[0]).toEqual({
- value: 'foo',
- scopes: [
- 'source.js',
- 'meta.function-call.js',
- 'entity.name.function.js'
- ]
- });
- expect(languageMode.tokenizedLines[2].tokens[0]).toEqual({
- value: 'bar',
- scopes: [
- 'source.js',
- 'meta.function-call.js',
- 'entity.name.function.js'
- ]
- });
- expect(languageMode.tokenizedLines[3].tokens[0]).toEqual({
- value: 'baz',
- scopes: [
- 'source.js',
- 'meta.function-call.js',
- 'entity.name.function.js'
- ]
- });
- // previous line 2 is joined with quux() on line 4
- expect(languageMode.tokenizedLines[4].tokens[0]).toEqual({
- value: 'quux',
- scopes: [
- 'source.js',
- 'meta.function-call.js',
- 'entity.name.function.js'
- ]
- });
- expect(languageMode.tokenizedLines[4].tokens[4]).toEqual({
- value: 'if',
- scopes: ['source.js', 'keyword.control.js']
- });
- // previous line 3 is pushed down to become line 5
- expect(languageMode.tokenizedLines[5].tokens[3]).toEqual({
- value: '=',
- scopes: ['source.js', 'keyword.operator.assignment.js']
- });
- });
- });
- describe('when the change invalidates the tokenization of subsequent lines', () => {
- it('schedules the invalidated lines to be tokenized in the background', () => {
- buffer.insert([5, 30], '/* */');
- buffer.insert([2, 0], '/*\nabcde\nabcder');
- expect(languageMode.tokenizedLines[2].tokens[0].scopes).toEqual([
- 'source.js',
- 'comment.block.js',
- 'punctuation.definition.comment.begin.js'
- ]);
- expect(languageMode.tokenizedLines[3].tokens[0].scopes).toEqual([
- 'source.js',
- 'comment.block.js'
- ]);
- expect(languageMode.tokenizedLines[4].tokens[0].scopes).toEqual([
- 'source.js',
- 'comment.block.js'
- ]);
- expect(languageMode.tokenizedLines[5].tokens[0].scopes).toEqual([
- 'source.js'
- ]);
- advanceClock(); // tokenize invalidated lines in background
- expect(languageMode.tokenizedLines[5].tokens[0].scopes).toEqual([
- 'source.js',
- 'comment.block.js'
- ]);
- expect(languageMode.tokenizedLines[6].tokens[0].scopes).toEqual([
- 'source.js',
- 'comment.block.js'
- ]);
- expect(languageMode.tokenizedLines[7].tokens[0].scopes).toEqual([
- 'source.js',
- 'comment.block.js'
- ]);
- expect(languageMode.tokenizedLines[8].tokens[0].scopes).not.toBe([
- 'source.js',
- 'comment.block.js'
- ]);
- });
- });
- });
- describe('when there is an insertion that is larger than the chunk size', () => {
- it('tokenizes the initial chunk synchronously, then tokenizes the remaining lines in the background', () => {
- const commentBlock = _.multiplyString(
- '// a comment\n',
- languageMode.chunkSize + 2
- );
- buffer.insert([0, 0], commentBlock);
- expect(
- languageMode.tokenizedLines[0].ruleStack != null
- ).toBeTruthy();
- expect(
- languageMode.tokenizedLines[4].ruleStack != null
- ).toBeTruthy();
- expect(languageMode.tokenizedLines[5]).toBeUndefined();
- advanceClock();
- expect(
- languageMode.tokenizedLines[5].ruleStack != null
- ).toBeTruthy();
- expect(
- languageMode.tokenizedLines[6].ruleStack != null
- ).toBeTruthy();
- });
- });
- });
- });
- describe('when the buffer contains hard-tabs', () => {
- beforeEach(async () => {
- atom.packages.activatePackage('language-coffee-script');
- buffer = atom.project.bufferForPathSync('sample-with-tabs.coffee');
- languageMode = new TextMateLanguageMode({
- buffer,
- config,
- grammar: atom.grammars.grammarForScopeName('source.coffee')
- });
- languageMode.startTokenizing();
- });
- afterEach(() => {
- languageMode.destroy();
- buffer.release();
- });
- describe('when the buffer is fully tokenized', () => {
- beforeEach(() => fullyTokenize(languageMode));
- });
- });
- describe('when tokenization completes', () => {
- it('emits the `tokenized` event', async () => {
- const editor = await atom.workspace.open('sample.js');
- const tokenizedHandler = jasmine.createSpy('tokenized handler');
- editor.languageMode.onDidTokenize(tokenizedHandler);
- fullyTokenize(editor.getBuffer().getLanguageMode());
- expect(tokenizedHandler.callCount).toBe(1);
- });
- it("doesn't re-emit the `tokenized` event when it is re-tokenized", async () => {
- const editor = await atom.workspace.open('sample.js');
- fullyTokenize(editor.languageMode);
- const tokenizedHandler = jasmine.createSpy('tokenized handler');
- editor.languageMode.onDidTokenize(tokenizedHandler);
- editor.getBuffer().insert([0, 0], "'");
- fullyTokenize(editor.languageMode);
- expect(tokenizedHandler).not.toHaveBeenCalled();
- });
- });
- describe('when the grammar is updated because a grammar it includes is activated', async () => {
- it('re-emits the `tokenized` event', async () => {
- let tokenizationCount = 0;
- const editor = await atom.workspace.open('coffee.coffee');
- editor.onDidTokenize(() => {
- tokenizationCount++;
- });
- fullyTokenize(editor.getBuffer().getLanguageMode());
- tokenizationCount = 0;
- await atom.packages.activatePackage('language-coffee-script');
- fullyTokenize(editor.getBuffer().getLanguageMode());
- expect(tokenizationCount).toBe(1);
- });
- it('retokenizes the buffer', async () => {
- await atom.packages.activatePackage('language-ruby-on-rails');
- await atom.packages.activatePackage('language-ruby');
- buffer = atom.project.bufferForPathSync();
- buffer.setText("<div class='name'><%= User.find(2).full_name %></div>");
- languageMode = new TextMateLanguageMode({
- buffer,
- config,
- grammar: atom.grammars.selectGrammar('test.erb')
- });
- fullyTokenize(languageMode);
- expect(languageMode.tokenizedLines[0].tokens[0]).toEqual({
- value: "<div class='name'>",
- scopes: ['text.html.ruby']
- });
- await atom.packages.activatePackage('language-html');
- fullyTokenize(languageMode);
- expect(languageMode.tokenizedLines[0].tokens[0]).toEqual({
- value: '<',
- scopes: [
- 'text.html.ruby',
- 'meta.tag.block.div.html',
- 'punctuation.definition.tag.begin.html'
- ]
- });
- });
- });
- describe('when the buffer is configured with the null grammar', () => {
- it('does not actually tokenize using the grammar', () => {
- spyOn(NullGrammar, 'tokenizeLine').andCallThrough();
- buffer = atom.project.bufferForPathSync(
- 'sample.will-use-the-null-grammar'
- );
- buffer.setText('a\nb\nc');
- languageMode = new TextMateLanguageMode({ buffer, config });
- const tokenizeCallback = jasmine.createSpy('onDidTokenize');
- languageMode.onDidTokenize(tokenizeCallback);
- expect(languageMode.tokenizedLines[0]).toBeUndefined();
- expect(languageMode.tokenizedLines[1]).toBeUndefined();
- expect(languageMode.tokenizedLines[2]).toBeUndefined();
- expect(tokenizeCallback.callCount).toBe(0);
- expect(NullGrammar.tokenizeLine).not.toHaveBeenCalled();
- fullyTokenize(languageMode);
- expect(languageMode.tokenizedLines[0]).toBeUndefined();
- expect(languageMode.tokenizedLines[1]).toBeUndefined();
- expect(languageMode.tokenizedLines[2]).toBeUndefined();
- expect(tokenizeCallback.callCount).toBe(0);
- expect(NullGrammar.tokenizeLine).not.toHaveBeenCalled();
- });
- });
- });
- describe('.tokenForPosition(position)', () => {
- afterEach(() => {
- languageMode.destroy();
- buffer.release();
- });
- it('returns the correct token (regression)', () => {
- buffer = atom.project.bufferForPathSync('sample.js');
- languageMode = new TextMateLanguageMode({
- buffer,
- config,
- grammar: atom.grammars.grammarForScopeName('source.js')
- });
- fullyTokenize(languageMode);
- expect(languageMode.tokenForPosition([1, 0]).scopes).toEqual([
- 'source.js'
- ]);
- expect(languageMode.tokenForPosition([1, 1]).scopes).toEqual([
- 'source.js'
- ]);
- expect(languageMode.tokenForPosition([1, 2]).scopes).toEqual([
- 'source.js',
- 'storage.type.var.js'
- ]);
- });
- });
- describe('.bufferRangeForScopeAtPosition(selector, position)', () => {
- beforeEach(() => {
- buffer = atom.project.bufferForPathSync('sample.js');
- languageMode = new TextMateLanguageMode({
- buffer,
- config,
- grammar: atom.grammars.grammarForScopeName('source.js')
- });
- fullyTokenize(languageMode);
- });
- describe('when the selector does not match the token at the position', () =>
- it('returns a falsy value', () =>
- expect(
- languageMode.bufferRangeForScopeAtPosition('.bogus', [0, 1])
- ).toBeUndefined()));
- describe('when the selector matches a single token at the position', () => {
- it('returns the range covered by the token', () => {
- expect(
- languageMode.bufferRangeForScopeAtPosition('.storage.type.var.js', [
- 0,
- 1
- ])
- ).toEqual([[0, 0], [0, 3]]);
- expect(
- languageMode.bufferRangeForScopeAtPosition('.storage.type.var.js', [
- 0,
- 3
- ])
- ).toEqual([[0, 0], [0, 3]]);
- });
- });
- describe('when the selector matches a run of multiple tokens at the position', () => {
- it('returns the range covered by all contiguous tokens (within a single line)', () => {
- expect(
- languageMode.bufferRangeForScopeAtPosition('.function', [1, 18])
- ).toEqual([[1, 6], [1, 28]]);
- });
- });
- });
- describe('.tokenizedLineForRow(row)', () => {
- it("returns the tokenized line for a row, or a placeholder line if it hasn't been tokenized yet", () => {
- buffer = atom.project.bufferForPathSync('sample.js');
- const grammar = atom.grammars.grammarForScopeName('source.js');
- languageMode = new TextMateLanguageMode({ buffer, config, grammar });
- const line0 = buffer.lineForRow(0);
- const jsScopeStartId = grammar.startIdForScope(grammar.scopeName);
- const jsScopeEndId = grammar.endIdForScope(grammar.scopeName);
- languageMode.startTokenizing();
- expect(languageMode.tokenizedLines[0]).toBeUndefined();
- expect(languageMode.tokenizedLineForRow(0).text).toBe(line0);
- expect(languageMode.tokenizedLineForRow(0).tags).toEqual([
- jsScopeStartId,
- line0.length,
- jsScopeEndId
- ]);
- advanceClock(1);
- expect(languageMode.tokenizedLines[0]).not.toBeUndefined();
- expect(languageMode.tokenizedLineForRow(0).text).toBe(line0);
- expect(languageMode.tokenizedLineForRow(0).tags).not.toEqual([
- jsScopeStartId,
- line0.length,
- jsScopeEndId
- ]);
- });
- it('returns undefined if the requested row is outside the buffer range', () => {
- buffer = atom.project.bufferForPathSync('sample.js');
- const grammar = atom.grammars.grammarForScopeName('source.js');
- languageMode = new TextMateLanguageMode({ buffer, config, grammar });
- fullyTokenize(languageMode);
- expect(languageMode.tokenizedLineForRow(999)).toBeUndefined();
- });
- });
- describe('.buildHighlightIterator', () => {
- const { TextMateHighlightIterator } = TextMateLanguageMode;
- it('iterates over the syntactic scope boundaries', () => {
- buffer = new TextBuffer({ text: 'var foo = 1 /*\nhello*/var bar = 2\n' });
- languageMode = new TextMateLanguageMode({
- buffer,
- config,
- grammar: atom.grammars.grammarForScopeName('source.js')
- });
- fullyTokenize(languageMode);
- const iterator = languageMode.buildHighlightIterator();
- iterator.seek(Point(0, 0));
- const expectedBoundaries = [
- {
- position: Point(0, 0),
- closeTags: [],
- openTags: [
- 'syntax--source syntax--js',
- 'syntax--storage syntax--type syntax--var syntax--js'
- ]
- },
- {
- position: Point(0, 3),
- closeTags: ['syntax--storage syntax--type syntax--var syntax--js'],
- openTags: []
- },
- {
- position: Point(0, 8),
- closeTags: [],
- openTags: [
- 'syntax--keyword syntax--operator syntax--assignment syntax--js'
- ]
- },
- {
- position: Point(0, 9),
- closeTags: [
- 'syntax--keyword syntax--operator syntax--assignment syntax--js'
- ],
- openTags: []
- },
- {
- position: Point(0, 10),
- closeTags: [],
- openTags: [
- 'syntax--constant syntax--numeric syntax--decimal syntax--js'
- ]
- },
- {
- position: Point(0, 11),
- closeTags: [
- 'syntax--constant syntax--numeric syntax--decimal syntax--js'
- ],
- openTags: []
- },
- {
- position: Point(0, 12),
- closeTags: [],
- openTags: [
- 'syntax--comment syntax--block syntax--js',
- 'syntax--punctuation syntax--definition syntax--comment syntax--begin syntax--js'
- ]
- },
- {
- position: Point(0, 14),
- closeTags: [
- 'syntax--punctuation syntax--definition syntax--comment syntax--begin syntax--js'
- ],
- openTags: []
- },
- {
- position: Point(1, 5),
- closeTags: [],
- openTags: [
- 'syntax--punctuation syntax--definition syntax--comment syntax--end syntax--js'
- ]
- },
- {
- position: Point(1, 7),
- closeTags: [
- 'syntax--punctuation syntax--definition syntax--comment syntax--end syntax--js',
- 'syntax--comment syntax--block syntax--js'
- ],
- openTags: ['syntax--storage syntax--type syntax--var syntax--js']
- },
- {
- position: Point(1, 10),
- closeTags: ['syntax--storage syntax--type syntax--var syntax--js'],
- openTags: []
- },
- {
- position: Point(1, 15),
- closeTags: [],
- openTags: [
- 'syntax--keyword syntax--operator syntax--assignment syntax--js'
- ]
- },
- {
- position: Point(1, 16),
- closeTags: [
- 'syntax--keyword syntax--operator syntax--assignment syntax--js'
- ],
- openTags: []
- },
- {
- position: Point(1, 17),
- closeTags: [],
- openTags: [
- 'syntax--constant syntax--numeric syntax--decimal syntax--js'
- ]
- },
- {
- position: Point(1, 18),
- closeTags: [
- 'syntax--constant syntax--numeric syntax--decimal syntax--js'
- ],
- openTags: []
- }
- ];
- while (true) {
- const boundary = {
- position: iterator.getPosition(),
- closeTags: iterator
- .getCloseScopeIds()
- .map(scopeId => languageMode.classNameForScopeId(scopeId)),
- openTags: iterator
- .getOpenScopeIds()
- .map(scopeId => languageMode.classNameForScopeId(scopeId))
- };
- expect(boundary).toEqual(expectedBoundaries.shift());
- if (!iterator.moveToSuccessor()) {
- break;
- }
- }
- expect(
- iterator
- .seek(Point(0, 1))
- .map(scopeId => languageMode.classNameForScopeId(scopeId))
- ).toEqual([
- 'syntax--source syntax--js',
- 'syntax--storage syntax--type syntax--var syntax--js'
- ]);
- expect(iterator.getPosition()).toEqual(Point(0, 3));
- expect(
- iterator
- .seek(Point(0, 8))
- .map(scopeId => languageMode.classNameForScopeId(scopeId))
- ).toEqual(['syntax--source syntax--js']);
- expect(iterator.getPosition()).toEqual(Point(0, 8));
- expect(
- iterator
- .seek(Point(1, 0))
- .map(scopeId => languageMode.classNameForScopeId(scopeId))
- ).toEqual([
- 'syntax--source syntax--js',
- 'syntax--comment syntax--block syntax--js'
- ]);
- expect(iterator.getPosition()).toEqual(Point(1, 0));
- expect(
- iterator
- .seek(Point(1, 18))
- .map(scopeId => languageMode.classNameForScopeId(scopeId))
- ).toEqual([
- 'syntax--source syntax--js',
- 'syntax--constant syntax--numeric syntax--decimal syntax--js'
- ]);
- expect(iterator.getPosition()).toEqual(Point(1, 18));
- expect(
- iterator
- .seek(Point(2, 0))
- .map(scopeId => languageMode.classNameForScopeId(scopeId))
- ).toEqual(['syntax--source syntax--js']);
- iterator.moveToSuccessor();
- }); // ensure we don't infinitely loop (regression test)
- it('does not report columns beyond the length of the line', async () => {
- await atom.packages.activatePackage('language-coffee-script');
- buffer = new TextBuffer({ text: '# hello\n# world' });
- languageMode = new TextMateLanguageMode({
- buffer,
- config,
- grammar: atom.grammars.grammarForScopeName('source.coffee')
- });
- fullyTokenize(languageMode);
- const iterator = languageMode.buildHighlightIterator();
- iterator.seek(Point(0, 0));
- iterator.moveToSuccessor();
- iterator.moveToSuccessor();
- expect(iterator.getPosition().column).toBe(7);
- iterator.moveToSuccessor();
- expect(iterator.getPosition().column).toBe(0);
- iterator.seek(Point(0, 7));
- expect(iterator.getPosition().column).toBe(7);
- iterator.seek(Point(0, 8));
- expect(iterator.getPosition().column).toBe(7);
- });
- it('correctly terminates scopes at the beginning of the line (regression)', () => {
- const grammar = atom.grammars.createGrammar('test', {
- scopeName: 'text.broken',
- name: 'Broken grammar',
- patterns: [
- { begin: 'start', end: '(?=end)', name: 'blue.broken' },
- { match: '.', name: 'yellow.broken' }
- ]
- });
- buffer = new TextBuffer({ text: 'start x\nend x\nx' });
- languageMode = new TextMateLanguageMode({ buffer, config, grammar });
- fullyTokenize(languageMode);
- const iterator = languageMode.buildHighlightIterator();
- iterator.seek(Point(1, 0));
- expect(iterator.getPosition()).toEqual([1, 0]);
- expect(
- iterator
- .getCloseScopeIds()
- .map(scopeId => languageMode.classNameForScopeId(scopeId))
- ).toEqual(['syntax--blue syntax--broken']);
- expect(
- iterator
- .getOpenScopeIds()
- .map(scopeId => languageMode.classNameForScopeId(scopeId))
- ).toEqual(['syntax--yellow syntax--broken']);
- });
- describe('TextMateHighlightIterator.seek(position)', function() {
- it('seeks to the leftmost tag boundary greater than or equal to the given position and returns the containing tags', function() {
- const languageMode = {
- tokenizedLineForRow(row) {
- if (row === 0) {
- return {
- tags: [-1, -2, -3, -4, -5, 3, -3, -4, -6, -5, 4, -6, -3, -4],
- text: 'foo bar',
- openScopes: []
- };
- } else {
- return null;
- }
- }
- };
- const iterator = new TextMateHighlightIterator(languageMode);
- expect(iterator.seek(Point(0, 0))).toEqual([]);
- expect(iterator.getPosition()).toEqual(Point(0, 0));
- expect(iterator.getCloseScopeIds()).toEqual([]);
- expect(iterator.getOpenScopeIds()).toEqual([257]);
- iterator.moveToSuccessor();
- expect(iterator.getCloseScopeIds()).toEqual([257]);
- expect(iterator.getOpenScopeIds()).toEqual([259]);
- expect(iterator.seek(Point(0, 1))).toEqual([261]);
- expect(iterator.getPosition()).toEqual(Point(0, 3));
- expect(iterator.getCloseScopeIds()).toEqual([]);
- expect(iterator.getOpenScopeIds()).toEqual([259]);
- iterator.moveToSuccessor();
- expect(iterator.getPosition()).toEqual(Point(0, 3));
- expect(iterator.getCloseScopeIds()).toEqual([259, 261]);
- expect(iterator.getOpenScopeIds()).toEqual([261]);
- expect(iterator.seek(Point(0, 3))).toEqual([261]);
- expect(iterator.getPosition()).toEqual(Point(0, 3));
- expect(iterator.getCloseScopeIds()).toEqual([]);
- expect(iterator.getOpenScopeIds()).toEqual([259]);
- iterator.moveToSuccessor();
- expect(iterator.getPosition()).toEqual(Point(0, 3));
- expect(iterator.getCloseScopeIds()).toEqual([259, 261]);
- expect(iterator.getOpenScopeIds()).toEqual([261]);
- iterator.moveToSuccessor();
- expect(iterator.getPosition()).toEqual(Point(0, 7));
- expect(iterator.getCloseScopeIds()).toEqual([261]);
- expect(iterator.getOpenScopeIds()).toEqual([259]);
- iterator.moveToSuccessor();
- expect(iterator.getPosition()).toEqual(Point(0, 7));
- expect(iterator.getCloseScopeIds()).toEqual([259]);
- expect(iterator.getOpenScopeIds()).toEqual([]);
- iterator.moveToSuccessor();
- expect(iterator.getPosition()).toEqual(Point(1, 0));
- expect(iterator.getCloseScopeIds()).toEqual([]);
- expect(iterator.getOpenScopeIds()).toEqual([]);
- expect(iterator.seek(Point(0, 5))).toEqual([261]);
- expect(iterator.getPosition()).toEqual(Point(0, 7));
- expect(iterator.getCloseScopeIds()).toEqual([261]);
- expect(iterator.getOpenScopeIds()).toEqual([259]);
- iterator.moveToSuccessor();
- expect(iterator.getPosition()).toEqual(Point(0, 7));
- expect(iterator.getCloseScopeIds()).toEqual([259]);
- expect(iterator.getOpenScopeIds()).toEqual([]);
- });
- });
- describe('TextMateHighlightIterator.moveToSuccessor()', function() {
- it('reports two boundaries at the same position when tags close, open, then close again without a non-negative integer separating them (regression)', () => {
- const languageMode = {
- tokenizedLineForRow() {
- return {
- tags: [-1, -2, -1, -2],
- text: '',
- openScopes: []
- };
- }
- };
- const iterator = new TextMateHighlightIterator(languageMode);
- iterator.seek(Point(0, 0));
- expect(iterator.getPosition()).toEqual(Point(0, 0));
- expect(iterator.getCloseScopeIds()).toEqual([]);
- expect(iterator.getOpenScopeIds()).toEqual([257]);
- iterator.moveToSuccessor();
- expect(iterator.getPosition()).toEqual(Point(0, 0));
- expect(iterator.getCloseScopeIds()).toEqual([257]);
- expect(iterator.getOpenScopeIds()).toEqual([257]);
- iterator.moveToSuccessor();
- expect(iterator.getCloseScopeIds()).toEqual([257]);
- expect(iterator.getOpenScopeIds()).toEqual([]);
- });
- });
- });
- describe('.suggestedIndentForBufferRow', () => {
- let editor;
- describe('javascript', () => {
- beforeEach(async () => {
- editor = await atom.workspace.open('sample.js', { autoIndent: false });
- await atom.packages.activatePackage('language-javascript');
- });
- it('bases indentation off of the previous non-blank line', () => {
- expect(editor.suggestedIndentForBufferRow(0)).toBe(0);
- expect(editor.suggestedIndentForBufferRow(1)).toBe(1);
- expect(editor.suggestedIndentForBufferRow(2)).toBe(2);
- expect(editor.suggestedIndentForBufferRow(5)).toBe(3);
- expect(editor.suggestedIndentForBufferRow(7)).toBe(2);
- expect(editor.suggestedIndentForBufferRow(9)).toBe(1);
- expect(editor.suggestedIndentForBufferRow(11)).toBe(1);
- });
- it('does not take invisibles into account', () => {
- editor.update({ showInvisibles: true });
- expect(editor.suggestedIndentForBufferRow(0)).toBe(0);
- expect(editor.suggestedIndentForBufferRow(1)).toBe(1);
- expect(editor.suggestedIndentForBufferRow(2)).toBe(2);
- expect(editor.suggestedIndentForBufferRow(5)).toBe(3);
- expect(editor.suggestedIndentForBufferRow(7)).toBe(2);
- expect(editor.suggestedIndentForBufferRow(9)).toBe(1);
- expect(editor.suggestedIndentForBufferRow(11)).toBe(1);
- });
- });
- describe('css', () => {
- beforeEach(async () => {
- editor = await atom.workspace.open('css.css', { autoIndent: true });
- await atom.packages.activatePackage('language-source');
- await atom.packages.activatePackage('language-css');
- });
- it('does not return negative values (regression)', () => {
- editor.setText('.test {\npadding: 0;\n}');
- expect(editor.suggestedIndentForBufferRow(2)).toBe(0);
- });
- });
- });
- describe('.isFoldableAtRow(row)', () => {
- let editor;
- beforeEach(() => {
- buffer = atom.project.bufferForPathSync('sample.js');
- buffer.insert([10, 0], ' // multi-line\n // comment\n // block\n');
- buffer.insert([0, 0], '// multi-line\n// comment\n// block\n');
- languageMode = new TextMateLanguageMode({
- buffer,
- config,
- grammar: atom.grammars.grammarForScopeName('source.js')
- });
- buffer.setLanguageMode(languageMode);
- fullyTokenize(languageMode);
- });
- it('includes the first line of multi-line comments', () => {
- expect(languageMode.isFoldableAtRow(0)).toBe(true);
- expect(languageMode.isFoldableAtRow(1)).toBe(false);
- expect(languageMode.isFoldableAtRow(2)).toBe(false);
- expect(languageMode.isFoldableAtRow(3)).toBe(true); // because of indent
- expect(languageMode.isFoldableAtRow(13)).toBe(true);
- expect(languageMode.isFoldableAtRow(14)).toBe(false);
- expect(languageMode.isFoldableAtRow(15)).toBe(false);
- expect(languageMode.isFoldableAtRow(16)).toBe(false);
- buffer.insert([0, Infinity], '\n');
- expect(languageMode.isFoldableAtRow(0)).toBe(false);
- expect(languageMode.isFoldableAtRow(1)).toBe(false);
- expect(languageMode.isFoldableAtRow(2)).toBe(true);
- expect(languageMode.isFoldableAtRow(3)).toBe(false);
- buffer.undo();
- expect(languageMode.isFoldableAtRow(0)).toBe(true);
- expect(languageMode.isFoldableAtRow(1)).toBe(false);
- expect(languageMode.isFoldableAtRow(2)).toBe(false);
- expect(languageMode.isFoldableAtRow(3)).toBe(true);
- }); // because of indent
- it('includes non-comment lines that precede an increase in indentation', () => {
- buffer.insert([2, 0], ' '); // commented lines preceding an indent aren't foldable
- expect(languageMode.isFoldableAtRow(1)).toBe(false);
- expect(languageMode.isFoldableAtRow(2)).toBe(false);
- expect(languageMode.isFoldableAtRow(3)).toBe(true);
- expect(languageMode.isFoldableAtRow(4)).toBe(true);
- expect(languageMode.isFoldableAtRow(5)).toBe(false);
- expect(languageMode.isFoldableAtRow(6)).toBe(false);
- expect(languageMode.isFoldableAtRow(7)).toBe(true);
- expect(languageMode.isFoldableAtRow(8)).toBe(false);
- buffer.insert([7, 0], ' ');
- expect(languageMode.isFoldableAtRow(6)).toBe(true);
- expect(languageMode.isFoldableAtRow(7)).toBe(false);
- expect(languageMode.isFoldableAtRow(8)).toBe(false);
- buffer.undo();
- expect(languageMode.isFoldableAtRow(6)).toBe(false);
- expect(languageMode.isFoldableAtRow(7)).toBe(true);
- expect(languageMode.isFoldableAtRow(8)).toBe(false);
- buffer.insert([7, 0], ' \n x\n');
- expect(languageMode.isFoldableAtRow(6)).toBe(true);
- expect(languageMode.isFoldableAtRow(7)).toBe(false);
- expect(languageMode.isFoldableAtRow(8)).toBe(false);
- buffer.insert([9, 0], ' ');
- expect(languageMode.isFoldableAtRow(6)).toBe(true);
- expect(languageMode.isFoldableAtRow(7)).toBe(false);
- expect(languageMode.isFoldableAtRow(8)).toBe(false);
- });
- it('returns true if the line starts a multi-line comment', async () => {
- editor = await atom.workspace.open('sample-with-comments.js');
- fullyTokenize(editor.getBuffer().getLanguageMode());
- expect(editor.isFoldableAtBufferRow(1)).toBe(true);
- expect(editor.isFoldableAtBufferRow(6)).toBe(true);
- expect(editor.isFoldableAtBufferRow(8)).toBe(false);
- expect(editor.isFoldableAtBufferRow(11)).toBe(true);
- expect(editor.isFoldableAtBufferRow(15)).toBe(false);
- expect(editor.isFoldableAtBufferRow(17)).toBe(true);
- expect(editor.isFoldableAtBufferRow(21)).toBe(true);
- expect(editor.isFoldableAtBufferRow(24)).toBe(true);
- expect(editor.isFoldableAtBufferRow(28)).toBe(false);
- });
- it('returns true for lines that end with a comment and are followed by an indented line', async () => {
- editor = await atom.workspace.open('sample-with-comments.js');
- expect(editor.isFoldableAtBufferRow(5)).toBe(true);
- });
- it("does not return true for a line in the middle of a comment that's followed by an indented line", async () => {
- editor = await atom.workspace.open('sample-with-comments.js');
- fullyTokenize(editor.getBuffer().getLanguageMode());
- expect(editor.isFoldableAtBufferRow(7)).toBe(false);
- editor.buffer.insert([8, 0], ' ');
- expect(editor.isFoldableAtBufferRow(7)).toBe(false);
- });
- });
- describe('.getFoldableRangesAtIndentLevel', () => {
- let editor;
- it('returns the ranges that can be folded at the given indent level', () => {
- buffer = new TextBuffer(dedent`
- if (a) {
- b();
- if (c) {
- d()
- if (e) {
- f()
- }
- g()
- }
- h()
- }
- i()
- if (j) {
- k()
- }
- `);
- languageMode = new TextMateLanguageMode({ buffer, config });
- expect(simulateFold(languageMode.getFoldableRangesAtIndentLevel(0, 2)))
- .toBe(dedent`
- if (a) {⋯
- }
- i()
- if (j) {⋯
- }
- `);
- expect(simulateFold(languageMode.getFoldableRangesAtIndentLevel(1, 2)))
- .toBe(dedent`
- if (a) {
- b();
- if (c) {⋯
- }
- h()
- }
- i()
- if (j) {
- k()
- }
- `);
- expect(simulateFold(languageMode.getFoldableRangesAtIndentLevel(2, 2)))
- .toBe(dedent`
- if (a) {
- b();
- if (c) {
- d()
- if (e) {⋯
- }
- g()
- }
- h()
- }
- i()
- if (j) {
- k()
- }
- `);
- });
- it('folds every foldable range at a given indentLevel', async () => {
- editor = await atom.workspace.open('sample-with-comments.js');
- fullyTokenize(editor.getBuffer().getLanguageMode());
- editor.foldAllAtIndentLevel(2);
- const folds = editor.unfoldAll();
- expect(folds.length).toBe(5);
- expect([folds[0].start.row, folds[0].end.row]).toEqual([6, 8]);
- expect([folds[1].start.row, folds[1].end.row]).toEqual([11, 16]);
- expect([folds[2].start.row, folds[2].end.row]).toEqual([17, 20]);
- expect([folds[3].start.row, folds[3].end.row]).toEqual([21, 22]);
- expect([folds[4].start.row, folds[4].end.row]).toEqual([24, 25]);
- });
- });
- describe('.getFoldableRanges', () => {
- it('returns the ranges that can be folded', () => {
- buffer = new TextBuffer(dedent`
- if (a) {
- b();
- if (c) {
- d()
- if (e) {
- f()
- }
- g()
- }
- h()
- }
- i()
- if (j) {
- k()
- }
- `);
- languageMode = new TextMateLanguageMode({ buffer, config });
- expect(languageMode.getFoldableRanges(2).map(r => r.toString())).toEqual(
- [
- ...languageMode.getFoldableRangesAtIndentLevel(0, 2),
- ...languageMode.getFoldableRangesAtIndentLevel(1, 2),
- ...languageMode.getFoldableRangesAtIndentLevel(2, 2)
- ]
- .sort((a, b) => a.start.row - b.start.row || a.end.row - b.end.row)
- .map(r => r.toString())
- );
- });
- it('works with multi-line comments', async () => {
- await atom.packages.activatePackage('language-javascript');
- const editor = await atom.workspace.open('sample-with-comments.js', {
- autoIndent: false
- });
- fullyTokenize(editor.getBuffer().getLanguageMode());
- editor.foldAll();
- const folds = editor.unfoldAll();
- expect(folds.length).toBe(8);
- expect([folds[0].start.row, folds[0].end.row]).toEqual([0, 30]);
- expect([folds[1].start.row, folds[1].end.row]).toEqual([1, 4]);
- expect([folds[2].start.row, folds[2].end.row]).toEqual([5, 27]);
- expect([folds[3].start.row, folds[3].end.row]).toEqual([6, 8]);
- expect([folds[4].start.row, folds[4].end.row]).toEqual([11, 16]);
- expect([folds[5].start.row, folds[5].end.row]).toEqual([17, 20]);
- expect([folds[6].start.row, folds[6].end.row]).toEqual([21, 22]);
- expect([folds[7].start.row, folds[7].end.row]).toEqual([24, 25]);
- });
- });
- describe('.getFoldableRangeContainingPoint', () => {
- it('returns the range for the smallest fold that contains the given range', () => {
- buffer = new TextBuffer(dedent`
- if (a) {
- b();
- if (c) {
- d()
- if (e) {
- f()
- }
- g()
- }
- h()
- }
- i()
- if (j) {
- k()
- }
- `);
- languageMode = new TextMateLanguageMode({ buffer, config });
- expect(
- languageMode.getFoldableRangeContainingPoint(Point(0, 5), 2)
- ).toBeNull();
- let range = languageMode.getFoldableRangeContainingPoint(Point(0, 10), 2);
- expect(simulateFold([range])).toBe(dedent`
- if (a) {⋯
- }
- i()
- if (j) {
- k()
- }
- `);
- range = languageMode.getFoldableRangeContainingPoint(Point(7, 0), 2);
- expect(simulateFold([range])).toBe(dedent`
- if (a) {
- b();
- if (c) {⋯
- }
- h()
- }
- i()
- if (j) {
- k()
- }
- `);
- range = languageMode.getFoldableRangeContainingPoint(
- Point(1, Infinity),
- 2
- );
- expect(simulateFold([range])).toBe(dedent`
- if (a) {⋯
- }
- i()
- if (j) {
- k()
- }
- `);
- range = languageMode.getFoldableRangeContainingPoint(Point(2, 20), 2);
- expect(simulateFold([range])).toBe(dedent`
- if (a) {
- b();
- if (c) {⋯
- }
- h()
- }
- i()
- if (j) {
- k()
- }
- `);
- });
- it('works for coffee-script', async () => {
- const editor = await atom.workspace.open('coffee.coffee');
- await atom.packages.activatePackage('language-coffee-script');
- buffer = editor.buffer;
- languageMode = editor.languageMode;
- expect(
- languageMode.getFoldableRangeContainingPoint(Point(0, Infinity), 2)
- ).toEqual([[0, Infinity], [20, Infinity]]);
- expect(
- languageMode.getFoldableRangeContainingPoint(Point(1, Infinity), 2)
- ).toEqual([[1, Infinity], [17, Infinity]]);
- expect(
- languageMode.getFoldableRangeContainingPoint(Point(2, Infinity), 2)
- ).toEqual([[1, Infinity], [17, Infinity]]);
- expect(
- languageMode.getFoldableRangeContainingPoint(Point(19, Infinity), 2)
- ).toEqual([[19, Infinity], [20, Infinity]]);
- });
- it('works for javascript', async () => {
- const editor = await atom.workspace.open('sample.js');
- await atom.packages.activatePackage('language-javascript');
- buffer = editor.buffer;
- languageMode = editor.languageMode;
- expect(
- editor.languageMode.getFoldableRangeContainingPoint(
- Point(0, Infinity),
- 2
- )
- ).toEqual([[0, Infinity], [12, Infinity]]);
- expect(
- editor.languageMode.getFoldableRangeContainingPoint(
- Point(1, Infinity),
- 2
- )
- ).toEqual([[1, Infinity], [9, Infinity]]);
- expect(
- editor.languageMode.getFoldableRangeContainingPoint(
- Point(2, Infinity),
- 2
- )
- ).toEqual([[1, Infinity], [9, Infinity]]);
- expect(
- editor.languageMode.getFoldableRangeContainingPoint(
- Point(4, Infinity),
- 2
- )
- ).toEqual([[4, Infinity], [7, Infinity]]);
- });
- it('searches upward and downward for surrounding comment lines and folds them as a single fold', async () => {
- await atom.packages.activatePackage('language-javascript');
- const editor = await atom.workspace.open('sample-with-comments.js');
- editor.buffer.insert(
- [1, 0],
- ' //this is a comment\n // and\n //more docs\n\n//second comment'
- );
- fullyTokenize(editor.getBuffer().getLanguageMode());
- editor.foldBufferRow(1);
- const [fold] = editor.unfoldAll();
- expect([fold.start.row, fold.end.row]).toEqual([1, 3]);
- });
- });
- describe('TokenIterator', () =>
- it('correctly terminates scopes at the beginning of the line (regression)', () => {
- const grammar = atom.grammars.createGrammar('test', {
- scopeName: 'text.broken',
- name: 'Broken grammar',
- patterns: [
- {
- begin: 'start',
- end: '(?=end)',
- name: 'blue.broken'
- },
- {
- match: '.',
- name: 'yellow.broken'
- }
- ]
- });
- const buffer = new TextBuffer({
- text: dedent`
- start x
- end x
- x
- `
- });
- const languageMode = new TextMateLanguageMode({
- buffer,
- grammar,
- config: atom.config,
- grammarRegistry: atom.grammars,
- packageManager: atom.packages,
- assert: atom.assert
- });
- fullyTokenize(languageMode);
- const tokenIterator = languageMode
- .tokenizedLineForRow(1)
- .getTokenIterator();
- tokenIterator.next();
- expect(tokenIterator.getBufferStart()).toBe(0);
- expect(tokenIterator.getScopeEnds()).toEqual([]);
- expect(tokenIterator.getScopeStarts()).toEqual([
- 'text.broken',
- 'yellow.broken'
- ]);
- }));
- function simulateFold(ranges) {
- buffer.transact(() => {
- for (const range of ranges.reverse()) {
- buffer.setTextInRange(range, '⋯');
- }
- });
- let text = buffer.getText();
- buffer.undo();
- return text;
- }
- function fullyTokenize(languageMode) {
- languageMode.startTokenizing();
- while (languageMode.firstInvalidRow() != null) {
- advanceClock();
- }
- }
- });
|