Last active
May 21, 2022 05:01
-
-
Save kbjr/43a2ea78387b40c639588c430fc19ba2 to your computer and use it in GitHub Desktop.
Marked.js Extensions
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import { marked } from 'marked'; | |
export interface DescriptionListToken extends marked.Tokens.Generic { | |
items: (DescriptionTermToken | DescriptionDetailToken)[]; | |
} | |
export interface DescriptionTermToken extends marked.Tokens.Generic { | |
text: string; | |
} | |
export interface DescriptionDetailToken extends marked.Tokens.Generic { | |
text: string; | |
} | |
export const description_list_ext: marked.TokenizerExtension & marked.RendererExtension = { | |
name: 'description_list', | |
level: 'block', | |
start: (src) => src.match(/:[:#-]/)?.index, | |
tokenizer(src, tokens) { | |
const rule = /^(?::[:#-](?:\s[^\n]*)?(?:\n|$))+/; | |
const match = rule.exec(src); | |
if (match) { | |
const token: DescriptionListToken = { | |
type: 'description_list', | |
raw: match[0], | |
items: [ ] | |
}; | |
const items = token.raw.trim().split('\n'); | |
const raw_buffer: string[] = [ ]; | |
const text_buffer: string[] = [ ]; | |
const flush_buffer = () => { | |
if (! raw_buffer.length) { | |
return; | |
} | |
// Grab the second character from the first line to determine the | |
// token type (should be "#" or "-") | |
const type = raw_buffer[0][1] === '#' ? 'description_term' : 'description_detail'; | |
const sub_token: (DescriptionTermToken | DescriptionDetailToken) = { | |
type, | |
raw: raw_buffer.join('\n'), | |
text: text_buffer.join('\n'), | |
tokens: [ ], | |
}; | |
raw_buffer.length = 0; | |
text_buffer.length = 0; | |
this.lexer.blockTokens(sub_token.text, sub_token.tokens); | |
token.items.push(sub_token); | |
}; | |
for (const line of items) { | |
const rule = /^:([:#-])(?:\s([^\n]*))?(?:\n|$)/; | |
const match = rule.exec(line); | |
if (match) { | |
if (match[1] !== ':') { | |
flush_buffer(); | |
} | |
raw_buffer.push(match[0]); | |
text_buffer.push(match[2]); | |
} | |
} | |
flush_buffer(); | |
return token; | |
} | |
}, | |
renderer(token: DescriptionListToken) { | |
const items = token.items.map((item) => { | |
const tag = item.type === 'description_term' ? 'dt' : 'dd'; | |
return ` | |
<${tag}> | |
${this.parser.parse(item.tokens)} | |
</${tag}> | |
`; | |
}); | |
return `<dl>${items.join('')}</dl>`; | |
} | |
}; |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import { marked } from 'marked'; | |
import { renderer } from './renderer'; | |
const footnotes = Symbol('footnotes'); | |
// We're going to hang some extra data off of the lexer so we can reference it | |
// later when generating links back to references | |
declare module 'marked' { | |
export interface Lexer { | |
[footnotes]: Record<number, number>; | |
} | |
} | |
export interface FootnoteLinkToken extends marked.Tokens.Generic { | |
id: number; | |
inst: number; | |
} | |
export const footnote_ref_ext: marked.TokenizerExtension & marked.RendererExtension = { | |
name: 'footnote_ref', | |
level: 'inline', | |
start: (src) => src.match(/\[\^/)?.index, | |
tokenizer(src, tokens) { | |
const rule = /^\[\^(\d+)]/; | |
const match = rule.exec(src); | |
if (match) { | |
const id = parseInt(match[1], 10); | |
return { | |
type: 'footnote_ref', | |
raw: match[0], | |
id: id, | |
inst: next_cite_inst(this.lexer, id) | |
}; | |
} | |
}, | |
renderer(token: FootnoteLinkToken) { | |
return `<sup id="cite:ref-${token.id}-${token.inst}"><a href="#cite:note-${token.id}">[${token.id}]</a></sup>`; | |
} | |
}; | |
export interface FootnoteListToken extends marked.Tokens.Generic { | |
text: string; | |
items: FootnoteToken[]; | |
} | |
export interface FootnoteToken extends marked.Tokens.Generic { | |
id: number; | |
text: string; | |
inst_count() : number; | |
} | |
export const footnote_list_ext: marked.TokenizerExtension & marked.RendererExtension = { | |
name: 'footnote_list', | |
level: 'block', | |
start: (src) => src.match(/\[/)?.index, | |
tokenizer(src, tokens) { | |
const rule = /^(\[\d+]:[^\n]*(?:\n|$))+/; | |
const match = rule.exec(src); | |
if (match) { | |
const token: FootnoteListToken = { | |
type: 'footnote_list', | |
raw: match[0], | |
text: match[0].trim(), | |
items: null | |
}; | |
const items = token.text.split('\n'); | |
token.items = items.map((src) => { | |
const rule = /^\[(\d+)]:([^\n]*)(?:\n|$)/; | |
const match = rule.exec(src); | |
if (match) { | |
const id = parseInt(match[1], 10); | |
const token: FootnoteToken = { | |
type: 'footnote', | |
raw: match[0], | |
id: id, | |
text: match[2], | |
tokens: [ ], | |
inst_count: () => get_cite_inst_count(this.lexer, id) | |
}; | |
this.lexer.inline(token.text, token.tokens); | |
return token; | |
} | |
}); | |
return token; | |
} | |
}, | |
renderer(token: FootnoteListToken) { | |
const items = token.items.map((item) => (` | |
<li value="${item.id}" id="cite:note-${item.id}"> | |
${footnote_link_backs(item.id, item.inst_count())} | |
${this.parser.parseInline(item.tokens, renderer)} | |
</li> | |
`)); | |
return `<ol role="doc-endnotes">${items.join('')}</ol>`; | |
} | |
}; | |
function get_cite_inst_count(lexer: marked.Lexer, id: number) { | |
if (! lexer[footnotes]) { | |
lexer[footnotes] = { }; | |
} | |
return lexer[footnotes][id] || 0; | |
} | |
function next_cite_inst(lexer: marked.Lexer, id: number) { | |
if (! lexer[footnotes]) { | |
lexer[footnotes] = { }; | |
} | |
if (! lexer[footnotes][id]) { | |
lexer[footnotes][id] = 0; | |
} | |
return ++lexer[footnotes][id]; | |
} | |
const letters = 'abcdefghijklmnopqrstuvwxyz'; | |
function footnote_link_backs(id: number, count: number) { | |
if (! count) { | |
return ''; | |
} | |
if (count === 1) { | |
return `<a href="#cite:ref-${id}-1" title="Back to reference">^</a>`; | |
} | |
// NOTE: We're using letters for link backs; If we run out, only | |
// show the first 26 references | |
count = Math.min(count, 26); | |
const links: string[] = [ ]; | |
for (let i = 0; i < count; i++) { | |
const letter = letters[i]; | |
links[i] = `<a href="#cite:ref-${id}-${i + 1}" title="Back to reference ${letter}">${letter}</a>`; | |
} | |
return `^ ${links.join(' ')}`; | |
} |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import { marked } from 'marked'; | |
import katex = require('katex'); | |
import type { KatexOptions } from 'katex'; | |
export interface KatexToken extends marked.Tokens.Generic { | |
text: string; | |
} | |
export const katex_block_ext: marked.TokenizerExtension & marked.RendererExtension = { | |
name: 'katex_block', | |
level: 'block', | |
start: (src) => src.match(/\$\$/)?.index, | |
tokenizer(src, tokens) { | |
const rule = /^\$\$([\s\S]+?)\$\$/; | |
const match = rule.exec(src); | |
if (match) { | |
return { | |
type: 'katex_block', | |
raw: match[0], | |
text: match[1] | |
}; | |
} | |
}, | |
renderer(token: KatexToken) { | |
const opts: KatexOptions = { | |
displayMode: true, // true == "block" | |
}; | |
return (katex as any).renderToString(token.text, opts); | |
} | |
}; | |
export const katex_inline_ext: marked.TokenizerExtension & marked.RendererExtension = { | |
name: 'katex_inline', | |
level: 'inline', | |
start: (src) => src.match(/\$/)?.index, | |
tokenizer(src, tokens) { | |
const rule = /^\$([^\n\s](?:[^\n]+[^\n\s])?)\$/; | |
const match = rule.exec(src); | |
if (match) { | |
return { | |
type: 'katex_inline', | |
raw: match[0], | |
text: match[1] | |
}; | |
} | |
}, | |
renderer(token: KatexToken) { | |
const opts: KatexOptions = { | |
displayMode: false, // false == "inline" | |
}; | |
return (katex as any).renderToString(token.text, opts); | |
} | |
}; |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import { marked } from 'marked'; | |
import { renderer } from './renderer'; | |
export interface MarkToken extends marked.Tokens.Generic { | |
text: string; | |
} | |
export const mark_ext: marked.TokenizerExtension & marked.RendererExtension = { | |
name: 'mark', | |
level: 'inline', | |
start: (src) => src.match(/==/)?.index, | |
tokenizer(src, tokens) { | |
const rule = /^==([^\n\s](?:[^\n]+[^\n\s])?)==/; | |
const match = rule.exec(src); | |
if (match) { | |
return { | |
type: 'mark', | |
raw: match[0], | |
text: match[1], | |
tokens: this.lexer.inlineTokens(match[1], [ ]) | |
}; | |
} | |
}, | |
renderer(token: MarkToken) { | |
return `<mark>${this.parser.parseInline(token.tokens, renderer)}</mark>`; | |
} | |
}; |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import { marked } from 'marked'; | |
export const renderer = new marked.Renderer(); | |
renderer.heading = function(text, level, raw, slugger) { | |
const id = slugger.slug(raw); | |
return ` | |
<h${level} id="${id}"> | |
${text} | |
<a class="heading-anchor" href="#${id}"> | |
<svg-icon icon="link" aria-hidden="true"></svg-icon> | |
<span style="display: none">Section titled ${text}</span> | |
</a> | |
</h${level}> | |
`; | |
}; |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment