diff --git a/grammars/html.cson b/grammars/html.cson
index 62c9612..6ccba13 100644
--- a/grammars/html.cson
+++ b/grammars/html.cson
@@ -350,12 +350,16 @@
'entities':
'patterns': [
{
- 'captures':
+ 'begin': '(&)([a-zA-Z0-9]+|#[0-9]+|#x[0-9a-fA-F]+)'
+ 'beginCaptures':
'1':
- 'name': 'punctuation.definition.entity.html'
- '3':
- 'name': 'punctuation.definition.entity.html'
- 'match': '(&)([a-zA-Z0-9]+|#[0-9]+|#x[0-9a-fA-F]+)(;)'
+ 'name': 'punctuation.definition.entity.begin.html'
+ '2':
+ 'name': 'entity.name.entity.other.html'
+ 'end': ';'
+ 'endCaptures':
+ '0':
+ 'name': 'punctuation.definition.entity.end.html'
'name': 'constant.character.entity.html'
}
{
diff --git a/spec/html-spec.coffee b/spec/html-spec.coffee
index 1d0f757..f209933 100644
--- a/spec/html-spec.coffee
+++ b/spec/html-spec.coffee
@@ -100,3 +100,12 @@ describe 'HTML grammar', ->
grammarTest path.join(__dirname, 'fixtures/syntax_test_html.html')
grammarTest path.join(__dirname, 'fixtures/syntax_test_html_template_fragments.html')
+
+ describe "entities", ->
+ it "tokenizes & and characters after it", ->
+ {tokens} = grammar.tokenizeLine '& & &a'
+
+ expect(tokens[0]).toEqual value: '&', scopes: ['text.html.basic', 'invalid.illegal.bad-ampersand.html']
+ expect(tokens[3]).toEqual value: 'amp', scopes: ['text.html.basic', 'constant.character.entity.html', 'entity.name.entity.other.html']
+ expect(tokens[4]).toEqual value: ';', scopes: ['text.html.basic', 'constant.character.entity.html', 'punctuation.definition.entity.end.html']
+ expect(tokens[7]).toEqual value: 'a', scopes: ['text.html.basic', 'constant.character.entity.html', 'entity.name.entity.other.html']