提交 | 用户 | 时间
|
58d006
|
1 |
// Released under MIT license |
A |
2 |
// Copyright (c) 2009-2010 Dominic Baggott |
|
3 |
// Copyright (c) 2009-2010 Ash Berlin |
|
4 |
// Copyright (c) 2011 Christoph Dorn <christoph@christophdorn.com> (http://www.christophdorn.com) |
|
5 |
|
|
6 |
/*jshint browser:true, devel:true */ |
|
7 |
|
|
8 |
(function( expose ) { |
|
9 |
|
|
10 |
/** |
|
11 |
* class Markdown |
|
12 |
* |
|
13 |
* Markdown processing in Javascript done right. We have very particular views |
|
14 |
* on what constitutes 'right' which include: |
|
15 |
* |
|
16 |
* - produces well-formed HTML (this means that em and strong nesting is |
|
17 |
* important) |
|
18 |
* |
|
19 |
* - has an intermediate representation to allow processing of parsed data (We |
|
20 |
* in fact have two, both as [JsonML]: a markdown tree and an HTML tree). |
|
21 |
* |
|
22 |
* - is easily extensible to add new dialects without having to rewrite the |
|
23 |
* entire parsing mechanics |
|
24 |
* |
|
25 |
* - has a good test suite |
|
26 |
* |
|
27 |
* This implementation fulfills all of these (except that the test suite could |
|
28 |
* do with expanding to automatically run all the fixtures from other Markdown |
|
29 |
* implementations.) |
|
30 |
* |
|
31 |
* ##### Intermediate Representation |
|
32 |
* |
|
33 |
* *TODO* Talk about this :) Its JsonML, but document the node names we use. |
|
34 |
* |
|
35 |
* [JsonML]: http://jsonml.org/ "JSON Markup Language" |
|
36 |
**/ |
|
37 |
var Markdown = expose.Markdown = function(dialect) { |
|
38 |
switch (typeof dialect) { |
|
39 |
case "undefined": |
|
40 |
this.dialect = Markdown.dialects.Gruber; |
|
41 |
break; |
|
42 |
case "object": |
|
43 |
this.dialect = dialect; |
|
44 |
break; |
|
45 |
default: |
|
46 |
if ( dialect in Markdown.dialects ) { |
|
47 |
this.dialect = Markdown.dialects[dialect]; |
|
48 |
} |
|
49 |
else { |
|
50 |
throw new Error("Unknown Markdown dialect '" + String(dialect) + "'"); |
|
51 |
} |
|
52 |
break; |
|
53 |
} |
|
54 |
this.em_state = []; |
|
55 |
this.strong_state = []; |
|
56 |
this.debug_indent = ""; |
|
57 |
}; |
|
58 |
|
|
59 |
/** |
|
60 |
* parse( markdown, [dialect] ) -> JsonML |
|
61 |
* - markdown (String): markdown string to parse |
|
62 |
* - dialect (String | Dialect): the dialect to use, defaults to gruber |
|
63 |
* |
|
64 |
* Parse `markdown` and return a markdown document as a Markdown.JsonML tree. |
|
65 |
**/ |
|
66 |
expose.parse = function( source, dialect ) { |
|
67 |
// dialect will default if undefined |
|
68 |
var md = new Markdown( dialect ); |
|
69 |
return md.toTree( source ); |
|
70 |
}; |
|
71 |
|
|
72 |
/** |
|
73 |
* toHTML( markdown, [dialect] ) -> String |
|
74 |
* toHTML( md_tree ) -> String |
|
75 |
* - markdown (String): markdown string to parse |
|
76 |
* - md_tree (Markdown.JsonML): parsed markdown tree |
|
77 |
* |
|
78 |
* Take markdown (either as a string or as a JsonML tree) and run it through |
|
79 |
* [[toHTMLTree]] then turn it into a well-formated HTML fragment. |
|
80 |
**/ |
|
81 |
expose.toHTML = function toHTML( source , dialect , options ) { |
|
82 |
var input = expose.toHTMLTree( source , dialect , options ); |
|
83 |
|
|
84 |
return expose.renderJsonML( input ); |
|
85 |
}; |
|
86 |
|
|
87 |
/** |
|
88 |
* toHTMLTree( markdown, [dialect] ) -> JsonML |
|
89 |
* toHTMLTree( md_tree ) -> JsonML |
|
90 |
* - markdown (String): markdown string to parse |
|
91 |
* - dialect (String | Dialect): the dialect to use, defaults to gruber |
|
92 |
* - md_tree (Markdown.JsonML): parsed markdown tree |
|
93 |
* |
|
94 |
* Turn markdown into HTML, represented as a JsonML tree. If a string is given |
|
95 |
* to this function, it is first parsed into a markdown tree by calling |
|
96 |
* [[parse]]. |
|
97 |
**/ |
|
98 |
expose.toHTMLTree = function toHTMLTree( input, dialect , options ) { |
|
99 |
// convert string input to an MD tree |
|
100 |
if ( typeof input ==="string" ) input = this.parse( input, dialect ); |
|
101 |
|
|
102 |
// Now convert the MD tree to an HTML tree |
|
103 |
|
|
104 |
// remove references from the tree |
|
105 |
var attrs = extract_attr( input ), |
|
106 |
refs = {}; |
|
107 |
|
|
108 |
if ( attrs && attrs.references ) { |
|
109 |
refs = attrs.references; |
|
110 |
} |
|
111 |
|
|
112 |
var html = convert_tree_to_html( input, refs , options ); |
|
113 |
merge_text_nodes( html ); |
|
114 |
return html; |
|
115 |
}; |
|
116 |
|
|
117 |
// For Spidermonkey based engines |
|
118 |
function mk_block_toSource() { |
|
119 |
return "Markdown.mk_block( " + |
|
120 |
uneval(this.toString()) + |
|
121 |
", " + |
|
122 |
uneval(this.trailing) + |
|
123 |
", " + |
|
124 |
uneval(this.lineNumber) + |
|
125 |
" )"; |
|
126 |
} |
|
127 |
|
|
128 |
// node |
|
129 |
function mk_block_inspect() { |
|
130 |
var util = require("util"); |
|
131 |
return "Markdown.mk_block( " + |
|
132 |
util.inspect(this.toString()) + |
|
133 |
", " + |
|
134 |
util.inspect(this.trailing) + |
|
135 |
", " + |
|
136 |
util.inspect(this.lineNumber) + |
|
137 |
" )"; |
|
138 |
|
|
139 |
} |
|
140 |
|
|
141 |
var mk_block = Markdown.mk_block = function(block, trail, line) { |
|
142 |
// Be helpful for default case in tests. |
|
143 |
if ( arguments.length == 1 ) trail = "\n\n"; |
|
144 |
|
|
145 |
var s = new String(block); |
|
146 |
s.trailing = trail; |
|
147 |
// To make it clear its not just a string |
|
148 |
s.inspect = mk_block_inspect; |
|
149 |
s.toSource = mk_block_toSource; |
|
150 |
|
|
151 |
if ( line != undefined ) |
|
152 |
s.lineNumber = line; |
|
153 |
|
|
154 |
return s; |
|
155 |
}; |
|
156 |
|
|
157 |
function count_lines( str ) { |
|
158 |
var n = 0, i = -1; |
|
159 |
while ( ( i = str.indexOf("\n", i + 1) ) !== -1 ) n++; |
|
160 |
return n; |
|
161 |
} |
|
162 |
|
|
163 |
// Internal - split source into rough blocks |
|
164 |
Markdown.prototype.split_blocks = function splitBlocks( input, startLine ) { |
|
165 |
input = input.replace(/(\r\n|\n|\r)/g, "\n"); |
|
166 |
// [\s\S] matches _anything_ (newline or space) |
|
167 |
// [^] is equivalent but doesn't work in IEs. |
|
168 |
var re = /([\s\S]+?)($|\n#|\n(?:\s*\n|$)+)/g, |
|
169 |
blocks = [], |
|
170 |
m; |
|
171 |
|
|
172 |
var line_no = 1; |
|
173 |
|
|
174 |
if ( ( m = /^(\s*\n)/.exec(input) ) != null ) { |
|
175 |
// skip (but count) leading blank lines |
|
176 |
line_no += count_lines( m[0] ); |
|
177 |
re.lastIndex = m[0].length; |
|
178 |
} |
|
179 |
|
|
180 |
while ( ( m = re.exec(input) ) !== null ) { |
|
181 |
if (m[2] == "\n#") { |
|
182 |
m[2] = "\n"; |
|
183 |
re.lastIndex--; |
|
184 |
} |
|
185 |
blocks.push( mk_block( m[1], m[2], line_no ) ); |
|
186 |
line_no += count_lines( m[0] ); |
|
187 |
} |
|
188 |
|
|
189 |
return blocks; |
|
190 |
}; |
|
191 |
|
|
192 |
/** |
|
193 |
* Markdown#processBlock( block, next ) -> undefined | [ JsonML, ... ] |
|
194 |
* - block (String): the block to process |
|
195 |
* - next (Array): the following blocks |
|
196 |
* |
|
197 |
* Process `block` and return an array of JsonML nodes representing `block`. |
|
198 |
* |
|
199 |
* It does this by asking each block level function in the dialect to process |
|
200 |
* the block until one can. Succesful handling is indicated by returning an |
|
201 |
* array (with zero or more JsonML nodes), failure by a false value. |
|
202 |
* |
|
203 |
* Blocks handlers are responsible for calling [[Markdown#processInline]] |
|
204 |
* themselves as appropriate. |
|
205 |
* |
|
206 |
* If the blocks were split incorrectly or adjacent blocks need collapsing you |
|
207 |
* can adjust `next` in place using shift/splice etc. |
|
208 |
* |
|
209 |
* If any of this default behaviour is not right for the dialect, you can |
|
210 |
* define a `__call__` method on the dialect that will get invoked to handle |
|
211 |
* the block processing. |
|
212 |
*/ |
|
213 |
Markdown.prototype.processBlock = function processBlock( block, next ) { |
|
214 |
var cbs = this.dialect.block, |
|
215 |
ord = cbs.__order__; |
|
216 |
|
|
217 |
if ( "__call__" in cbs ) { |
|
218 |
return cbs.__call__.call(this, block, next); |
|
219 |
} |
|
220 |
|
|
221 |
for ( var i = 0; i < ord.length; i++ ) { |
|
222 |
//D:this.debug( "Testing", ord[i] ); |
|
223 |
var res = cbs[ ord[i] ].call( this, block, next ); |
|
224 |
if ( res ) { |
|
225 |
//D:this.debug(" matched"); |
|
226 |
if ( !isArray(res) || ( res.length > 0 && !( isArray(res[0]) ) ) ) |
|
227 |
this.debug(ord[i], "didn't return a proper array"); |
|
228 |
//D:this.debug( "" ); |
|
229 |
return res; |
|
230 |
} |
|
231 |
} |
|
232 |
|
|
233 |
// Uhoh! no match! Should we throw an error? |
|
234 |
return []; |
|
235 |
}; |
|
236 |
|
|
237 |
Markdown.prototype.processInline = function processInline( block ) { |
|
238 |
return this.dialect.inline.__call__.call( this, String( block ) ); |
|
239 |
}; |
|
240 |
|
|
241 |
/** |
|
242 |
* Markdown#toTree( source ) -> JsonML |
|
243 |
* - source (String): markdown source to parse |
|
244 |
* |
|
245 |
* Parse `source` into a JsonML tree representing the markdown document. |
|
246 |
**/ |
|
247 |
// custom_tree means set this.tree to `custom_tree` and restore old value on return |
|
248 |
Markdown.prototype.toTree = function toTree( source, custom_root ) { |
|
249 |
var blocks = source instanceof Array ? source : this.split_blocks( source ); |
|
250 |
|
|
251 |
// Make tree a member variable so its easier to mess with in extensions |
|
252 |
var old_tree = this.tree; |
|
253 |
try { |
|
254 |
this.tree = custom_root || this.tree || [ "markdown" ]; |
|
255 |
|
|
256 |
blocks: |
|
257 |
while ( blocks.length ) { |
|
258 |
var b = this.processBlock( blocks.shift(), blocks ); |
|
259 |
|
|
260 |
// Reference blocks and the like won't return any content |
|
261 |
if ( !b.length ) continue blocks; |
|
262 |
|
|
263 |
this.tree.push.apply( this.tree, b ); |
|
264 |
} |
|
265 |
return this.tree; |
|
266 |
} |
|
267 |
finally { |
|
268 |
if ( custom_root ) { |
|
269 |
this.tree = old_tree; |
|
270 |
} |
|
271 |
} |
|
272 |
}; |
|
273 |
|
|
274 |
// Noop by default |
|
275 |
Markdown.prototype.debug = function () { |
|
276 |
var args = Array.prototype.slice.call( arguments); |
|
277 |
args.unshift(this.debug_indent); |
|
278 |
if ( typeof print !== "undefined" ) |
|
279 |
print.apply( print, args ); |
|
280 |
if ( typeof console !== "undefined" && typeof console.log !== "undefined" ) |
|
281 |
console.log.apply( null, args ); |
|
282 |
} |
|
283 |
|
|
284 |
Markdown.prototype.loop_re_over_block = function( re, block, cb ) { |
|
285 |
// Dont use /g regexps with this |
|
286 |
var m, |
|
287 |
b = block.valueOf(); |
|
288 |
|
|
289 |
while ( b.length && (m = re.exec(b) ) != null ) { |
|
290 |
b = b.substr( m[0].length ); |
|
291 |
cb.call(this, m); |
|
292 |
} |
|
293 |
return b; |
|
294 |
}; |
|
295 |
|
|
296 |
/** |
|
297 |
* Markdown.dialects |
|
298 |
* |
|
299 |
* Namespace of built-in dialects. |
|
300 |
**/ |
|
301 |
Markdown.dialects = {}; |
|
302 |
|
|
303 |
/** |
|
304 |
* Markdown.dialects.Gruber |
|
305 |
* |
|
306 |
* The default dialect that follows the rules set out by John Gruber's |
|
307 |
* markdown.pl as closely as possible. Well actually we follow the behaviour of |
|
308 |
* that script which in some places is not exactly what the syntax web page |
|
309 |
* says. |
|
310 |
**/ |
|
311 |
Markdown.dialects.Gruber = { |
|
312 |
block: { |
|
313 |
atxHeader: function atxHeader( block, next ) { |
|
314 |
var m = block.match( /^(#{1,6})\s*(.*?)\s*#*\s*(?:\n|$)/ ); |
|
315 |
|
|
316 |
if ( !m ) return undefined; |
|
317 |
|
|
318 |
var header = [ "header", { level: m[ 1 ].length } ]; |
|
319 |
Array.prototype.push.apply(header, this.processInline(m[ 2 ])); |
|
320 |
|
|
321 |
if ( m[0].length < block.length ) |
|
322 |
next.unshift( mk_block( block.substr( m[0].length ), block.trailing, block.lineNumber + 2 ) ); |
|
323 |
|
|
324 |
return [ header ]; |
|
325 |
}, |
|
326 |
|
|
327 |
setextHeader: function setextHeader( block, next ) { |
|
328 |
var m = block.match( /^(.*)\n([-=])\2\2+(?:\n|$)/ ); |
|
329 |
|
|
330 |
if ( !m ) return undefined; |
|
331 |
|
|
332 |
var level = ( m[ 2 ] === "=" ) ? 1 : 2; |
|
333 |
var header = [ "header", { level : level }, m[ 1 ] ]; |
|
334 |
|
|
335 |
if ( m[0].length < block.length ) |
|
336 |
next.unshift( mk_block( block.substr( m[0].length ), block.trailing, block.lineNumber + 2 ) ); |
|
337 |
|
|
338 |
return [ header ]; |
|
339 |
}, |
|
340 |
|
|
341 |
code: function code( block, next ) { |
|
342 |
// | Foo |
|
343 |
// |bar |
|
344 |
// should be a code block followed by a paragraph. Fun |
|
345 |
// |
|
346 |
// There might also be adjacent code block to merge. |
|
347 |
|
|
348 |
var ret = [], |
|
349 |
re = /^(?: {0,3}\t| {4})(.*)\n?/, |
|
350 |
lines; |
|
351 |
|
|
352 |
// 4 spaces + content |
|
353 |
if ( !block.match( re ) ) return undefined; |
|
354 |
|
|
355 |
block_search: |
|
356 |
do { |
|
357 |
// Now pull out the rest of the lines |
|
358 |
var b = this.loop_re_over_block( |
|
359 |
re, block.valueOf(), function( m ) { ret.push( m[1] ); } ); |
|
360 |
|
|
361 |
if ( b.length ) { |
|
362 |
// Case alluded to in first comment. push it back on as a new block |
|
363 |
next.unshift( mk_block(b, block.trailing) ); |
|
364 |
break block_search; |
|
365 |
} |
|
366 |
else if ( next.length ) { |
|
367 |
// Check the next block - it might be code too |
|
368 |
if ( !next[0].match( re ) ) break block_search; |
|
369 |
|
|
370 |
// Pull how how many blanks lines follow - minus two to account for .join |
|
371 |
ret.push ( block.trailing.replace(/[^\n]/g, "").substring(2) ); |
|
372 |
|
|
373 |
block = next.shift(); |
|
374 |
} |
|
375 |
else { |
|
376 |
break block_search; |
|
377 |
} |
|
378 |
} while ( true ); |
|
379 |
|
|
380 |
return [ [ "code_block", ret.join("\n") ] ]; |
|
381 |
}, |
|
382 |
|
|
383 |
horizRule: function horizRule( block, next ) { |
|
384 |
// this needs to find any hr in the block to handle abutting blocks |
|
385 |
var m = block.match( /^(?:([\s\S]*?)\n)?[ \t]*([-_*])(?:[ \t]*\2){2,}[ \t]*(?:\n([\s\S]*))?$/ ); |
|
386 |
|
|
387 |
if ( !m ) { |
|
388 |
return undefined; |
|
389 |
} |
|
390 |
|
|
391 |
var jsonml = [ [ "hr" ] ]; |
|
392 |
|
|
393 |
// if there's a leading abutting block, process it |
|
394 |
if ( m[ 1 ] ) { |
|
395 |
jsonml.unshift.apply( jsonml, this.processBlock( m[ 1 ], [] ) ); |
|
396 |
} |
|
397 |
|
|
398 |
// if there's a trailing abutting block, stick it into next |
|
399 |
if ( m[ 3 ] ) { |
|
400 |
next.unshift( mk_block( m[ 3 ] ) ); |
|
401 |
} |
|
402 |
|
|
403 |
return jsonml; |
|
404 |
}, |
|
405 |
|
|
406 |
// There are two types of lists. Tight and loose. Tight lists have no whitespace |
|
407 |
// between the items (and result in text just in the <li>) and loose lists, |
|
408 |
// which have an empty line between list items, resulting in (one or more) |
|
409 |
// paragraphs inside the <li>. |
|
410 |
// |
|
411 |
// There are all sorts weird edge cases about the original markdown.pl's |
|
412 |
// handling of lists: |
|
413 |
// |
|
414 |
// * Nested lists are supposed to be indented by four chars per level. But |
|
415 |
// if they aren't, you can get a nested list by indenting by less than |
|
416 |
// four so long as the indent doesn't match an indent of an existing list |
|
417 |
// item in the 'nest stack'. |
|
418 |
// |
|
419 |
// * The type of the list (bullet or number) is controlled just by the |
|
420 |
// first item at the indent. Subsequent changes are ignored unless they |
|
421 |
// are for nested lists |
|
422 |
// |
|
423 |
lists: (function( ) { |
|
424 |
// Use a closure to hide a few variables. |
|
425 |
var any_list = "[*+-]|\\d+\\.", |
|
426 |
bullet_list = /[*+-]/, |
|
427 |
number_list = /\d+\./, |
|
428 |
// Capture leading indent as it matters for determining nested lists. |
|
429 |
is_list_re = new RegExp( "^( {0,3})(" + any_list + ")[ \t]+" ), |
|
430 |
indent_re = "(?: {0,3}\\t| {4})"; |
|
431 |
|
|
432 |
// TODO: Cache this regexp for certain depths. |
|
433 |
// Create a regexp suitable for matching an li for a given stack depth |
|
434 |
function regex_for_depth( depth ) { |
|
435 |
|
|
436 |
return new RegExp( |
|
437 |
// m[1] = indent, m[2] = list_type |
|
438 |
"(?:^(" + indent_re + "{0," + depth + "} {0,3})(" + any_list + ")\\s+)|" + |
|
439 |
// m[3] = cont |
|
440 |
"(^" + indent_re + "{0," + (depth-1) + "}[ ]{0,4})" |
|
441 |
); |
|
442 |
} |
|
443 |
function expand_tab( input ) { |
|
444 |
return input.replace( / {0,3}\t/g, " " ); |
|
445 |
} |
|
446 |
|
|
447 |
// Add inline content `inline` to `li`. inline comes from processInline |
|
448 |
// so is an array of content |
|
449 |
function add(li, loose, inline, nl) { |
|
450 |
if ( loose ) { |
|
451 |
li.push( [ "para" ].concat(inline) ); |
|
452 |
return; |
|
453 |
} |
|
454 |
// Hmmm, should this be any block level element or just paras? |
|
455 |
var add_to = li[li.length -1] instanceof Array && li[li.length - 1][0] == "para" |
|
456 |
? li[li.length -1] |
|
457 |
: li; |
|
458 |
|
|
459 |
// If there is already some content in this list, add the new line in |
|
460 |
if ( nl && li.length > 1 ) inline.unshift(nl); |
|
461 |
|
|
462 |
for ( var i = 0; i < inline.length; i++ ) { |
|
463 |
var what = inline[i], |
|
464 |
is_str = typeof what == "string"; |
|
465 |
if ( is_str && add_to.length > 1 && typeof add_to[add_to.length-1] == "string" ) { |
|
466 |
add_to[ add_to.length-1 ] += what; |
|
467 |
} |
|
468 |
else { |
|
469 |
add_to.push( what ); |
|
470 |
} |
|
471 |
} |
|
472 |
} |
|
473 |
|
|
474 |
// contained means have an indent greater than the current one. On |
|
475 |
// *every* line in the block |
|
476 |
function get_contained_blocks( depth, blocks ) { |
|
477 |
|
|
478 |
var re = new RegExp( "^(" + indent_re + "{" + depth + "}.*?\\n?)*$" ), |
|
479 |
replace = new RegExp("^" + indent_re + "{" + depth + "}", "gm"), |
|
480 |
ret = []; |
|
481 |
|
|
482 |
while ( blocks.length > 0 ) { |
|
483 |
if ( re.exec( blocks[0] ) ) { |
|
484 |
var b = blocks.shift(), |
|
485 |
// Now remove that indent |
|
486 |
x = b.replace( replace, ""); |
|
487 |
|
|
488 |
ret.push( mk_block( x, b.trailing, b.lineNumber ) ); |
|
489 |
} |
|
490 |
else { |
|
491 |
break; |
|
492 |
} |
|
493 |
} |
|
494 |
return ret; |
|
495 |
} |
|
496 |
|
|
497 |
// passed to stack.forEach to turn list items up the stack into paras |
|
498 |
function paragraphify(s, i, stack) { |
|
499 |
var list = s.list; |
|
500 |
var last_li = list[list.length-1]; |
|
501 |
|
|
502 |
if ( last_li[1] instanceof Array && last_li[1][0] == "para" ) { |
|
503 |
return; |
|
504 |
} |
|
505 |
if ( i + 1 == stack.length ) { |
|
506 |
// Last stack frame |
|
507 |
// Keep the same array, but replace the contents |
|
508 |
last_li.push( ["para"].concat( last_li.splice(1, last_li.length - 1) ) ); |
|
509 |
} |
|
510 |
else { |
|
511 |
var sublist = last_li.pop(); |
|
512 |
last_li.push( ["para"].concat( last_li.splice(1, last_li.length - 1) ), sublist ); |
|
513 |
} |
|
514 |
} |
|
515 |
|
|
516 |
// The matcher function |
|
517 |
return function( block, next ) { |
|
518 |
var m = block.match( is_list_re ); |
|
519 |
if ( !m ) return undefined; |
|
520 |
|
|
521 |
function make_list( m ) { |
|
522 |
var list = bullet_list.exec( m[2] ) |
|
523 |
? ["bulletlist"] |
|
524 |
: ["numberlist"]; |
|
525 |
|
|
526 |
stack.push( { list: list, indent: m[1] } ); |
|
527 |
return list; |
|
528 |
} |
|
529 |
|
|
530 |
|
|
531 |
var stack = [], // Stack of lists for nesting. |
|
532 |
list = make_list( m ), |
|
533 |
last_li, |
|
534 |
loose = false, |
|
535 |
ret = [ stack[0].list ], |
|
536 |
i; |
|
537 |
|
|
538 |
// Loop to search over block looking for inner block elements and loose lists |
|
539 |
loose_search: |
|
540 |
while ( true ) { |
|
541 |
// Split into lines preserving new lines at end of line |
|
542 |
var lines = block.split( /(?=\n)/ ); |
|
543 |
|
|
544 |
// We have to grab all lines for a li and call processInline on them |
|
545 |
// once as there are some inline things that can span lines. |
|
546 |
var li_accumulate = ""; |
|
547 |
|
|
548 |
// Loop over the lines in this block looking for tight lists. |
|
549 |
tight_search: |
|
550 |
for ( var line_no = 0; line_no < lines.length; line_no++ ) { |
|
551 |
var nl = "", |
|
552 |
l = lines[line_no].replace(/^\n/, function(n) { nl = n; return ""; }); |
|
553 |
|
|
554 |
// TODO: really should cache this |
|
555 |
var line_re = regex_for_depth( stack.length ); |
|
556 |
|
|
557 |
m = l.match( line_re ); |
|
558 |
//print( "line:", uneval(l), "\nline match:", uneval(m) ); |
|
559 |
|
|
560 |
// We have a list item |
|
561 |
if ( m[1] !== undefined ) { |
|
562 |
// Process the previous list item, if any |
|
563 |
if ( li_accumulate.length ) { |
|
564 |
add( last_li, loose, this.processInline( li_accumulate ), nl ); |
|
565 |
// Loose mode will have been dealt with. Reset it |
|
566 |
loose = false; |
|
567 |
li_accumulate = ""; |
|
568 |
} |
|
569 |
|
|
570 |
m[1] = expand_tab( m[1] ); |
|
571 |
var wanted_depth = Math.floor(m[1].length/4)+1; |
|
572 |
//print( "want:", wanted_depth, "stack:", stack.length); |
|
573 |
if ( wanted_depth > stack.length ) { |
|
574 |
// Deep enough for a nested list outright |
|
575 |
//print ( "new nested list" ); |
|
576 |
list = make_list( m ); |
|
577 |
last_li.push( list ); |
|
578 |
last_li = list[1] = [ "listitem" ]; |
|
579 |
} |
|
580 |
else { |
|
581 |
// We aren't deep enough to be strictly a new level. This is |
|
582 |
// where Md.pl goes nuts. If the indent matches a level in the |
|
583 |
// stack, put it there, else put it one deeper then the |
|
584 |
// wanted_depth deserves. |
|
585 |
var found = false; |
|
586 |
for ( i = 0; i < stack.length; i++ ) { |
|
587 |
if ( stack[ i ].indent != m[1] ) continue; |
|
588 |
list = stack[ i ].list; |
|
589 |
stack.splice( i+1, stack.length - (i+1) ); |
|
590 |
found = true; |
|
591 |
break; |
|
592 |
} |
|
593 |
|
|
594 |
if (!found) { |
|
595 |
//print("not found. l:", uneval(l)); |
|
596 |
wanted_depth++; |
|
597 |
if ( wanted_depth <= stack.length ) { |
|
598 |
stack.splice(wanted_depth, stack.length - wanted_depth); |
|
599 |
//print("Desired depth now", wanted_depth, "stack:", stack.length); |
|
600 |
list = stack[wanted_depth-1].list; |
|
601 |
//print("list:", uneval(list) ); |
|
602 |
} |
|
603 |
else { |
|
604 |
//print ("made new stack for messy indent"); |
|
605 |
list = make_list(m); |
|
606 |
last_li.push(list); |
|
607 |
} |
|
608 |
} |
|
609 |
|
|
610 |
//print( uneval(list), "last", list === stack[stack.length-1].list ); |
|
611 |
last_li = [ "listitem" ]; |
|
612 |
list.push(last_li); |
|
613 |
} // end depth of shenegains |
|
614 |
nl = ""; |
|
615 |
} |
|
616 |
|
|
617 |
// Add content |
|
618 |
if ( l.length > m[0].length ) { |
|
619 |
li_accumulate += nl + l.substr( m[0].length ); |
|
620 |
} |
|
621 |
} // tight_search |
|
622 |
|
|
623 |
if ( li_accumulate.length ) { |
|
624 |
add( last_li, loose, this.processInline( li_accumulate ), nl ); |
|
625 |
// Loose mode will have been dealt with. Reset it |
|
626 |
loose = false; |
|
627 |
li_accumulate = ""; |
|
628 |
} |
|
629 |
|
|
630 |
// Look at the next block - we might have a loose list. Or an extra |
|
631 |
// paragraph for the current li |
|
632 |
var contained = get_contained_blocks( stack.length, next ); |
|
633 |
|
|
634 |
// Deal with code blocks or properly nested lists |
|
635 |
if ( contained.length > 0 ) { |
|
636 |
// Make sure all listitems up the stack are paragraphs |
|
637 |
forEach( stack, paragraphify, this); |
|
638 |
|
|
639 |
last_li.push.apply( last_li, this.toTree( contained, [] ) ); |
|
640 |
} |
|
641 |
|
|
642 |
var next_block = next[0] && next[0].valueOf() || ""; |
|
643 |
|
|
644 |
if ( next_block.match(is_list_re) || next_block.match( /^ / ) ) { |
|
645 |
block = next.shift(); |
|
646 |
|
|
647 |
// Check for an HR following a list: features/lists/hr_abutting |
|
648 |
var hr = this.dialect.block.horizRule( block, next ); |
|
649 |
|
|
650 |
if ( hr ) { |
|
651 |
ret.push.apply(ret, hr); |
|
652 |
break; |
|
653 |
} |
|
654 |
|
|
655 |
// Make sure all listitems up the stack are paragraphs |
|
656 |
forEach( stack, paragraphify, this); |
|
657 |
|
|
658 |
loose = true; |
|
659 |
continue loose_search; |
|
660 |
} |
|
661 |
break; |
|
662 |
} // loose_search |
|
663 |
|
|
664 |
return ret; |
|
665 |
}; |
|
666 |
})(), |
|
667 |
|
|
668 |
blockquote: function blockquote( block, next ) { |
|
669 |
if ( !block.match( /^>/m ) ) |
|
670 |
return undefined; |
|
671 |
|
|
672 |
var jsonml = []; |
|
673 |
|
|
674 |
// separate out the leading abutting block, if any. I.e. in this case: |
|
675 |
// |
|
676 |
// a |
|
677 |
// > b |
|
678 |
// |
|
679 |
if ( block[ 0 ] != ">" ) { |
|
680 |
var lines = block.split( /\n/ ), |
|
681 |
prev = [], |
|
682 |
line_no = block.lineNumber; |
|
683 |
|
|
684 |
// keep shifting lines until you find a crotchet |
|
685 |
while ( lines.length && lines[ 0 ][ 0 ] != ">" ) { |
|
686 |
prev.push( lines.shift() ); |
|
687 |
line_no++; |
|
688 |
} |
|
689 |
|
|
690 |
var abutting = mk_block( prev.join( "\n" ), "\n", block.lineNumber ); |
|
691 |
jsonml.push.apply( jsonml, this.processBlock( abutting, [] ) ); |
|
692 |
// reassemble new block of just block quotes! |
|
693 |
block = mk_block( lines.join( "\n" ), block.trailing, line_no ); |
|
694 |
} |
|
695 |
|
|
696 |
|
|
697 |
// if the next block is also a blockquote merge it in |
|
698 |
while ( next.length && next[ 0 ][ 0 ] == ">" ) { |
|
699 |
var b = next.shift(); |
|
700 |
block = mk_block( block + block.trailing + b, b.trailing, block.lineNumber ); |
|
701 |
} |
|
702 |
|
|
703 |
// Strip off the leading "> " and re-process as a block. |
|
704 |
var input = block.replace( /^> ?/gm, "" ), |
|
705 |
old_tree = this.tree, |
|
706 |
processedBlock = this.toTree( input, [ "blockquote" ] ), |
|
707 |
attr = extract_attr( processedBlock ); |
|
708 |
|
|
709 |
// If any link references were found get rid of them |
|
710 |
if ( attr && attr.references ) { |
|
711 |
delete attr.references; |
|
712 |
// And then remove the attribute object if it's empty |
|
713 |
if ( isEmpty( attr ) ) { |
|
714 |
processedBlock.splice( 1, 1 ); |
|
715 |
} |
|
716 |
} |
|
717 |
|
|
718 |
jsonml.push( processedBlock ); |
|
719 |
return jsonml; |
|
720 |
}, |
|
721 |
|
|
722 |
referenceDefn: function referenceDefn( block, next) { |
|
723 |
var re = /^\s*\[(.*?)\]:\s*(\S+)(?:\s+(?:(['"])(.*?)\3|\((.*?)\)))?\n?/; |
|
724 |
// interesting matches are [ , ref_id, url, , title, title ] |
|
725 |
|
|
726 |
if ( !block.match(re) ) |
|
727 |
return undefined; |
|
728 |
|
|
729 |
// make an attribute node if it doesn't exist |
|
730 |
if ( !extract_attr( this.tree ) ) { |
|
731 |
this.tree.splice( 1, 0, {} ); |
|
732 |
} |
|
733 |
|
|
734 |
var attrs = extract_attr( this.tree ); |
|
735 |
|
|
736 |
// make a references hash if it doesn't exist |
|
737 |
if ( attrs.references === undefined ) { |
|
738 |
attrs.references = {}; |
|
739 |
} |
|
740 |
|
|
741 |
var b = this.loop_re_over_block(re, block, function( m ) { |
|
742 |
|
|
743 |
if ( m[2] && m[2][0] == "<" && m[2][m[2].length-1] == ">" ) |
|
744 |
m[2] = m[2].substring( 1, m[2].length - 1 ); |
|
745 |
|
|
746 |
var ref = attrs.references[ m[1].toLowerCase() ] = { |
|
747 |
href: m[2] |
|
748 |
}; |
|
749 |
|
|
750 |
if ( m[4] !== undefined ) |
|
751 |
ref.title = m[4]; |
|
752 |
else if ( m[5] !== undefined ) |
|
753 |
ref.title = m[5]; |
|
754 |
|
|
755 |
} ); |
|
756 |
|
|
757 |
if ( b.length ) |
|
758 |
next.unshift( mk_block( b, block.trailing ) ); |
|
759 |
|
|
760 |
return []; |
|
761 |
}, |
|
762 |
|
|
763 |
para: function para( block, next ) { |
|
764 |
// everything's a para! |
|
765 |
return [ ["para"].concat( this.processInline( block ) ) ]; |
|
766 |
} |
|
767 |
} |
|
768 |
}; |
|
769 |
|
|
770 |
Markdown.dialects.Gruber.inline = { |
|
771 |
|
|
772 |
__oneElement__: function oneElement( text, patterns_or_re, previous_nodes ) { |
|
773 |
var m, |
|
774 |
res, |
|
775 |
lastIndex = 0; |
|
776 |
|
|
777 |
patterns_or_re = patterns_or_re || this.dialect.inline.__patterns__; |
|
778 |
var re = new RegExp( "([\\s\\S]*?)(" + (patterns_or_re.source || patterns_or_re) + ")" ); |
|
779 |
|
|
780 |
m = re.exec( text ); |
|
781 |
if (!m) { |
|
782 |
// Just boring text |
|
783 |
return [ text.length, text ]; |
|
784 |
} |
|
785 |
else if ( m[1] ) { |
|
786 |
// Some un-interesting text matched. Return that first |
|
787 |
return [ m[1].length, m[1] ]; |
|
788 |
} |
|
789 |
|
|
790 |
var res; |
|
791 |
if ( m[2] in this.dialect.inline ) { |
|
792 |
res = this.dialect.inline[ m[2] ].call( |
|
793 |
this, |
|
794 |
text.substr( m.index ), m, previous_nodes || [] ); |
|
795 |
} |
|
796 |
// Default for now to make dev easier. just slurp special and output it. |
|
797 |
res = res || [ m[2].length, m[2] ]; |
|
798 |
return res; |
|
799 |
}, |
|
800 |
|
|
801 |
__call__: function inline( text, patterns ) { |
|
802 |
|
|
803 |
var out = [], |
|
804 |
res; |
|
805 |
|
|
806 |
function add(x) { |
|
807 |
//D:self.debug(" adding output", uneval(x)); |
|
808 |
if ( typeof x == "string" && typeof out[out.length-1] == "string" ) |
|
809 |
out[ out.length-1 ] += x; |
|
810 |
else |
|
811 |
out.push(x); |
|
812 |
} |
|
813 |
|
|
814 |
while ( text.length > 0 ) { |
|
815 |
res = this.dialect.inline.__oneElement__.call(this, text, patterns, out ); |
|
816 |
text = text.substr( res.shift() ); |
|
817 |
forEach(res, add ) |
|
818 |
} |
|
819 |
|
|
820 |
return out; |
|
821 |
}, |
|
822 |
|
|
823 |
// These characters are intersting elsewhere, so have rules for them so that |
|
824 |
// chunks of plain text blocks don't include them |
|
825 |
"]": function () {}, |
|
826 |
"}": function () {}, |
|
827 |
|
|
828 |
__escape__ : /^\\[\\`\*_{}\[\]()#\+.!\-]/, |
|
829 |
|
|
830 |
"\\": function escaped( text ) { |
|
831 |
// [ length of input processed, node/children to add... ] |
|
832 |
// Only esacape: \ ` * _ { } [ ] ( ) # * + - . ! |
|
833 |
if ( this.dialect.inline.__escape__.exec( text ) ) |
|
834 |
return [ 2, text.charAt( 1 ) ]; |
|
835 |
else |
|
836 |
// Not an esacpe |
|
837 |
return [ 1, "\\" ]; |
|
838 |
}, |
|
839 |
|
|
840 |
"![": function image( text ) { |
|
841 |
|
|
842 |
// Unlike images, alt text is plain text only. no other elements are |
|
843 |
// allowed in there |
|
844 |
|
|
845 |
// ![Alt text](/path/to/img.jpg "Optional title") |
|
846 |
// 1 2 3 4 <--- captures |
|
847 |
var m = text.match( /^!\[(.*?)\][ \t]*\([ \t]*([^")]*?)(?:[ \t]+(["'])(.*?)\3)?[ \t]*\)/ ); |
|
848 |
|
|
849 |
if ( m ) { |
|
850 |
if ( m[2] && m[2][0] == "<" && m[2][m[2].length-1] == ">" ) |
|
851 |
m[2] = m[2].substring( 1, m[2].length - 1 ); |
|
852 |
|
|
853 |
m[2] = this.dialect.inline.__call__.call( this, m[2], /\\/ )[0]; |
|
854 |
|
|
855 |
var attrs = { alt: m[1], href: m[2] || "" }; |
|
856 |
if ( m[4] !== undefined) |
|
857 |
attrs.title = m[4]; |
|
858 |
|
|
859 |
return [ m[0].length, [ "img", attrs ] ]; |
|
860 |
} |
|
861 |
|
|
862 |
// ![Alt text][id] |
|
863 |
m = text.match( /^!\[(.*?)\][ \t]*\[(.*?)\]/ ); |
|
864 |
|
|
865 |
if ( m ) { |
|
866 |
// We can't check if the reference is known here as it likely wont be |
|
867 |
// found till after. Check it in md tree->hmtl tree conversion |
|
868 |
return [ m[0].length, [ "img_ref", { alt: m[1], ref: m[2].toLowerCase(), original: m[0] } ] ]; |
|
869 |
} |
|
870 |
|
|
871 |
// Just consume the '![' |
|
872 |
return [ 2, "![" ]; |
|
873 |
}, |
|
874 |
|
|
875 |
"[": function link( text ) { |
|
876 |
|
|
877 |
var orig = String(text); |
|
878 |
// Inline content is possible inside `link text` |
|
879 |
var res = Markdown.DialectHelpers.inline_until_char.call( this, text.substr(1), "]" ); |
|
880 |
|
|
881 |
// No closing ']' found. Just consume the [ |
|
882 |
if ( !res ) return [ 1, "[" ]; |
|
883 |
|
|
884 |
var consumed = 1 + res[ 0 ], |
|
885 |
children = res[ 1 ], |
|
886 |
link, |
|
887 |
attrs; |
|
888 |
|
|
889 |
// At this point the first [...] has been parsed. See what follows to find |
|
890 |
// out which kind of link we are (reference or direct url) |
|
891 |
text = text.substr( consumed ); |
|
892 |
|
|
893 |
// [link text](/path/to/img.jpg "Optional title") |
|
894 |
// 1 2 3 <--- captures |
|
895 |
// This will capture up to the last paren in the block. We then pull |
|
896 |
// back based on if there a matching ones in the url |
|
897 |
// ([here](/url/(test)) |
|
898 |
// The parens have to be balanced |
|
899 |
var m = text.match( /^\s*\([ \t]*([^"']*)(?:[ \t]+(["'])(.*?)\2)?[ \t]*\)/ ); |
|
900 |
if ( m ) { |
|
901 |
var url = m[1]; |
|
902 |
consumed += m[0].length; |
|
903 |
|
|
904 |
if ( url && url[0] == "<" && url[url.length-1] == ">" ) |
|
905 |
url = url.substring( 1, url.length - 1 ); |
|
906 |
|
|
907 |
// If there is a title we don't have to worry about parens in the url |
|
908 |
if ( !m[3] ) { |
|
909 |
var open_parens = 1; // One open that isn't in the capture |
|
910 |
for ( var len = 0; len < url.length; len++ ) { |
|
911 |
switch ( url[len] ) { |
|
912 |
case "(": |
|
913 |
open_parens++; |
|
914 |
break; |
|
915 |
case ")": |
|
916 |
if ( --open_parens == 0) { |
|
917 |
consumed -= url.length - len; |
|
918 |
url = url.substring(0, len); |
|
919 |
} |
|
920 |
break; |
|
921 |
} |
|
922 |
} |
|
923 |
} |
|
924 |
|
|
925 |
// Process escapes only |
|
926 |
url = this.dialect.inline.__call__.call( this, url, /\\/ )[0]; |
|
927 |
|
|
928 |
attrs = { href: url || "" }; |
|
929 |
if ( m[3] !== undefined) |
|
930 |
attrs.title = m[3]; |
|
931 |
|
|
932 |
link = [ "link", attrs ].concat( children ); |
|
933 |
return [ consumed, link ]; |
|
934 |
} |
|
935 |
|
|
936 |
// [Alt text][id] |
|
937 |
// [Alt text] [id] |
|
938 |
m = text.match( /^\s*\[(.*?)\]/ ); |
|
939 |
|
|
940 |
if ( m ) { |
|
941 |
|
|
942 |
consumed += m[ 0 ].length; |
|
943 |
|
|
944 |
// [links][] uses links as its reference |
|
945 |
attrs = { ref: ( m[ 1 ] || String(children) ).toLowerCase(), original: orig.substr( 0, consumed ) }; |
|
946 |
|
|
947 |
link = [ "link_ref", attrs ].concat( children ); |
|
948 |
|
|
949 |
// We can't check if the reference is known here as it likely wont be |
|
950 |
// found till after. Check it in md tree->hmtl tree conversion. |
|
951 |
// Store the original so that conversion can revert if the ref isn't found. |
|
952 |
return [ consumed, link ]; |
|
953 |
} |
|
954 |
|
|
955 |
// [id] |
|
956 |
// Only if id is plain (no formatting.) |
|
957 |
if ( children.length == 1 && typeof children[0] == "string" ) { |
|
958 |
|
|
959 |
attrs = { ref: children[0].toLowerCase(), original: orig.substr( 0, consumed ) }; |
|
960 |
link = [ "link_ref", attrs, children[0] ]; |
|
961 |
return [ consumed, link ]; |
|
962 |
} |
|
963 |
|
|
964 |
// Just consume the "[" |
|
965 |
return [ 1, "[" ]; |
|
966 |
}, |
|
967 |
|
|
968 |
|
|
969 |
"<": function autoLink( text ) { |
|
970 |
var m; |
|
971 |
|
|
972 |
if ( ( m = text.match( /^<(?:((https?|ftp|mailto):[^>]+)|(.*?@.*?\.[a-zA-Z]+))>/ ) ) != null ) { |
|
973 |
if ( m[3] ) { |
|
974 |
return [ m[0].length, [ "link", { href: "mailto:" + m[3] }, m[3] ] ]; |
|
975 |
|
|
976 |
} |
|
977 |
else if ( m[2] == "mailto" ) { |
|
978 |
return [ m[0].length, [ "link", { href: m[1] }, m[1].substr("mailto:".length ) ] ]; |
|
979 |
} |
|
980 |
else |
|
981 |
return [ m[0].length, [ "link", { href: m[1] }, m[1] ] ]; |
|
982 |
} |
|
983 |
|
|
984 |
return [ 1, "<" ]; |
|
985 |
}, |
|
986 |
|
|
987 |
"`": function inlineCode( text ) { |
|
988 |
// Inline code block. as many backticks as you like to start it |
|
989 |
// Always skip over the opening ticks. |
|
990 |
var m = text.match( /(`+)(([\s\S]*?)\1)/ ); |
|
991 |
|
|
992 |
if ( m && m[2] ) |
|
993 |
return [ m[1].length + m[2].length, [ "inlinecode", m[3] ] ]; |
|
994 |
else { |
|
995 |
// TODO: No matching end code found - warn! |
|
996 |
return [ 1, "`" ]; |
|
997 |
} |
|
998 |
}, |
|
999 |
|
|
1000 |
" \n": function lineBreak( text ) { |
|
1001 |
return [ 3, [ "linebreak" ] ]; |
|
1002 |
} |
|
1003 |
|
|
1004 |
}; |
|
1005 |
|
|
1006 |
// Meta Helper/generator method for em and strong handling |
|
1007 |
function strong_em( tag, md ) { |
|
1008 |
|
|
1009 |
var state_slot = tag + "_state", |
|
1010 |
other_slot = tag == "strong" ? "em_state" : "strong_state"; |
|
1011 |
|
|
1012 |
function CloseTag(len) { |
|
1013 |
this.len_after = len; |
|
1014 |
this.name = "close_" + md; |
|
1015 |
} |
|
1016 |
|
|
1017 |
return function ( text, orig_match ) { |
|
1018 |
|
|
1019 |
if ( this[state_slot][0] == md ) { |
|
1020 |
// Most recent em is of this type |
|
1021 |
//D:this.debug("closing", md); |
|
1022 |
this[state_slot].shift(); |
|
1023 |
|
|
1024 |
// "Consume" everything to go back to the recrusion in the else-block below |
|
1025 |
return[ text.length, new CloseTag(text.length-md.length) ]; |
|
1026 |
} |
|
1027 |
else { |
|
1028 |
// Store a clone of the em/strong states |
|
1029 |
var other = this[other_slot].slice(), |
|
1030 |
state = this[state_slot].slice(); |
|
1031 |
|
|
1032 |
this[state_slot].unshift(md); |
|
1033 |
|
|
1034 |
//D:this.debug_indent += " "; |
|
1035 |
|
|
1036 |
// Recurse |
|
1037 |
var res = this.processInline( text.substr( md.length ) ); |
|
1038 |
//D:this.debug_indent = this.debug_indent.substr(2); |
|
1039 |
|
|
1040 |
var last = res[res.length - 1]; |
|
1041 |
|
|
1042 |
//D:this.debug("processInline from", tag + ": ", uneval( res ) ); |
|
1043 |
|
|
1044 |
var check = this[state_slot].shift(); |
|
1045 |
if ( last instanceof CloseTag ) { |
|
1046 |
res.pop(); |
|
1047 |
// We matched! Huzzah. |
|
1048 |
var consumed = text.length - last.len_after; |
|
1049 |
return [ consumed, [ tag ].concat(res) ]; |
|
1050 |
} |
|
1051 |
else { |
|
1052 |
// Restore the state of the other kind. We might have mistakenly closed it. |
|
1053 |
this[other_slot] = other; |
|
1054 |
this[state_slot] = state; |
|
1055 |
|
|
1056 |
// We can't reuse the processed result as it could have wrong parsing contexts in it. |
|
1057 |
return [ md.length, md ]; |
|
1058 |
} |
|
1059 |
} |
|
1060 |
}; // End returned function |
|
1061 |
} |
|
1062 |
|
|
1063 |
Markdown.dialects.Gruber.inline["**"] = strong_em("strong", "**"); |
|
1064 |
Markdown.dialects.Gruber.inline["__"] = strong_em("strong", "__"); |
|
1065 |
Markdown.dialects.Gruber.inline["*"] = strong_em("em", "*"); |
|
1066 |
Markdown.dialects.Gruber.inline["_"] = strong_em("em", "_"); |
|
1067 |
|
|
1068 |
|
|
1069 |
// Build default order from insertion order. |
|
1070 |
Markdown.buildBlockOrder = function(d) { |
|
1071 |
var ord = []; |
|
1072 |
for ( var i in d ) { |
|
1073 |
if ( i == "__order__" || i == "__call__" ) continue; |
|
1074 |
ord.push( i ); |
|
1075 |
} |
|
1076 |
d.__order__ = ord; |
|
1077 |
}; |
|
1078 |
|
|
1079 |
// Build patterns for inline matcher |
|
1080 |
Markdown.buildInlinePatterns = function(d) { |
|
1081 |
var patterns = []; |
|
1082 |
|
|
1083 |
for ( var i in d ) { |
|
1084 |
// __foo__ is reserved and not a pattern |
|
1085 |
if ( i.match( /^__.*__$/) ) continue; |
|
1086 |
var l = i.replace( /([\\.*+?|()\[\]{}])/g, "\\$1" ) |
|
1087 |
.replace( /\n/, "\\n" ); |
|
1088 |
patterns.push( i.length == 1 ? l : "(?:" + l + ")" ); |
|
1089 |
} |
|
1090 |
|
|
1091 |
patterns = patterns.join("|"); |
|
1092 |
d.__patterns__ = patterns; |
|
1093 |
//print("patterns:", uneval( patterns ) ); |
|
1094 |
|
|
1095 |
var fn = d.__call__; |
|
1096 |
d.__call__ = function(text, pattern) { |
|
1097 |
if ( pattern != undefined ) { |
|
1098 |
return fn.call(this, text, pattern); |
|
1099 |
} |
|
1100 |
else |
|
1101 |
{ |
|
1102 |
return fn.call(this, text, patterns); |
|
1103 |
} |
|
1104 |
}; |
|
1105 |
}; |
|
1106 |
|
|
1107 |
Markdown.DialectHelpers = {}; |
|
1108 |
Markdown.DialectHelpers.inline_until_char = function( text, want ) { |
|
1109 |
var consumed = 0, |
|
1110 |
nodes = []; |
|
1111 |
|
|
1112 |
while ( true ) { |
|
1113 |
if ( text.charAt( consumed ) == want ) { |
|
1114 |
// Found the character we were looking for |
|
1115 |
consumed++; |
|
1116 |
return [ consumed, nodes ]; |
|
1117 |
} |
|
1118 |
|
|
1119 |
if ( consumed >= text.length ) { |
|
1120 |
// No closing char found. Abort. |
|
1121 |
return null; |
|
1122 |
} |
|
1123 |
|
|
1124 |
var res = this.dialect.inline.__oneElement__.call(this, text.substr( consumed ) ); |
|
1125 |
consumed += res[ 0 ]; |
|
1126 |
// Add any returned nodes. |
|
1127 |
nodes.push.apply( nodes, res.slice( 1 ) ); |
|
1128 |
} |
|
1129 |
} |
|
1130 |
|
|
1131 |
// Helper function to make sub-classing a dialect easier |
|
1132 |
Markdown.subclassDialect = function( d ) { |
|
1133 |
function Block() {} |
|
1134 |
Block.prototype = d.block; |
|
1135 |
function Inline() {} |
|
1136 |
Inline.prototype = d.inline; |
|
1137 |
|
|
1138 |
return { block: new Block(), inline: new Inline() }; |
|
1139 |
}; |
|
1140 |
|
|
1141 |
Markdown.buildBlockOrder ( Markdown.dialects.Gruber.block ); |
|
1142 |
Markdown.buildInlinePatterns( Markdown.dialects.Gruber.inline ); |
|
1143 |
|
|
1144 |
Markdown.dialects.Maruku = Markdown.subclassDialect( Markdown.dialects.Gruber ); |
|
1145 |
|
|
1146 |
Markdown.dialects.Maruku.processMetaHash = function processMetaHash( meta_string ) { |
|
1147 |
var meta = split_meta_hash( meta_string ), |
|
1148 |
attr = {}; |
|
1149 |
|
|
1150 |
for ( var i = 0; i < meta.length; ++i ) { |
|
1151 |
// id: #foo |
|
1152 |
if ( /^#/.test( meta[ i ] ) ) { |
|
1153 |
attr.id = meta[ i ].substring( 1 ); |
|
1154 |
} |
|
1155 |
// class: .foo |
|
1156 |
else if ( /^\./.test( meta[ i ] ) ) { |
|
1157 |
// if class already exists, append the new one |
|
1158 |
if ( attr["class"] ) { |
|
1159 |
attr["class"] = attr["class"] + meta[ i ].replace( /./, " " ); |
|
1160 |
} |
|
1161 |
else { |
|
1162 |
attr["class"] = meta[ i ].substring( 1 ); |
|
1163 |
} |
|
1164 |
} |
|
1165 |
// attribute: foo=bar |
|
1166 |
else if ( /\=/.test( meta[ i ] ) ) { |
|
1167 |
var s = meta[ i ].split( /\=/ ); |
|
1168 |
attr[ s[ 0 ] ] = s[ 1 ]; |
|
1169 |
} |
|
1170 |
} |
|
1171 |
|
|
1172 |
return attr; |
|
1173 |
} |
|
1174 |
|
|
1175 |
function split_meta_hash( meta_string ) { |
|
1176 |
var meta = meta_string.split( "" ), |
|
1177 |
parts = [ "" ], |
|
1178 |
in_quotes = false; |
|
1179 |
|
|
1180 |
while ( meta.length ) { |
|
1181 |
var letter = meta.shift(); |
|
1182 |
switch ( letter ) { |
|
1183 |
case " " : |
|
1184 |
// if we're in a quoted section, keep it |
|
1185 |
if ( in_quotes ) { |
|
1186 |
parts[ parts.length - 1 ] += letter; |
|
1187 |
} |
|
1188 |
// otherwise make a new part |
|
1189 |
else { |
|
1190 |
parts.push( "" ); |
|
1191 |
} |
|
1192 |
break; |
|
1193 |
case "'" : |
|
1194 |
case '"' : |
|
1195 |
// reverse the quotes and move straight on |
|
1196 |
in_quotes = !in_quotes; |
|
1197 |
break; |
|
1198 |
case "\\" : |
|
1199 |
// shift off the next letter to be used straight away. |
|
1200 |
// it was escaped so we'll keep it whatever it is |
|
1201 |
letter = meta.shift(); |
|
1202 |
default : |
|
1203 |
parts[ parts.length - 1 ] += letter; |
|
1204 |
break; |
|
1205 |
} |
|
1206 |
} |
|
1207 |
|
|
1208 |
return parts; |
|
1209 |
} |
|
1210 |
|
|
1211 |
Markdown.dialects.Maruku.block.document_meta = function document_meta( block, next ) { |
|
1212 |
// we're only interested in the first block |
|
1213 |
if ( block.lineNumber > 1 ) return undefined; |
|
1214 |
|
|
1215 |
// document_meta blocks consist of one or more lines of `Key: Value\n` |
|
1216 |
if ( ! block.match( /^(?:\w+:.*\n)*\w+:.*$/ ) ) return undefined; |
|
1217 |
|
|
1218 |
// make an attribute node if it doesn't exist |
|
1219 |
if ( !extract_attr( this.tree ) ) { |
|
1220 |
this.tree.splice( 1, 0, {} ); |
|
1221 |
} |
|
1222 |
|
|
1223 |
var pairs = block.split( /\n/ ); |
|
1224 |
for ( p in pairs ) { |
|
1225 |
var m = pairs[ p ].match( /(\w+):\s*(.*)$/ ), |
|
1226 |
key = m[ 1 ].toLowerCase(), |
|
1227 |
value = m[ 2 ]; |
|
1228 |
|
|
1229 |
this.tree[ 1 ][ key ] = value; |
|
1230 |
} |
|
1231 |
|
|
1232 |
// document_meta produces no content! |
|
1233 |
return []; |
|
1234 |
}; |
|
1235 |
|
|
1236 |
Markdown.dialects.Maruku.block.block_meta = function block_meta( block, next ) { |
|
1237 |
// check if the last line of the block is an meta hash |
|
1238 |
var m = block.match( /(^|\n) {0,3}\{:\s*((?:\\\}|[^\}])*)\s*\}$/ ); |
|
1239 |
if ( !m ) return undefined; |
|
1240 |
|
|
1241 |
// process the meta hash |
|
1242 |
var attr = this.dialect.processMetaHash( m[ 2 ] ); |
|
1243 |
|
|
1244 |
var hash; |
|
1245 |
|
|
1246 |
// if we matched ^ then we need to apply meta to the previous block |
|
1247 |
if ( m[ 1 ] === "" ) { |
|
1248 |
var node = this.tree[ this.tree.length - 1 ]; |
|
1249 |
hash = extract_attr( node ); |
|
1250 |
|
|
1251 |
// if the node is a string (rather than JsonML), bail |
|
1252 |
if ( typeof node === "string" ) return undefined; |
|
1253 |
|
|
1254 |
// create the attribute hash if it doesn't exist |
|
1255 |
if ( !hash ) { |
|
1256 |
hash = {}; |
|
1257 |
node.splice( 1, 0, hash ); |
|
1258 |
} |
|
1259 |
|
|
1260 |
// add the attributes in |
|
1261 |
for ( a in attr ) { |
|
1262 |
hash[ a ] = attr[ a ]; |
|
1263 |
} |
|
1264 |
|
|
1265 |
// return nothing so the meta hash is removed |
|
1266 |
return []; |
|
1267 |
} |
|
1268 |
|
|
1269 |
// pull the meta hash off the block and process what's left |
|
1270 |
var b = block.replace( /\n.*$/, "" ), |
|
1271 |
result = this.processBlock( b, [] ); |
|
1272 |
|
|
1273 |
// get or make the attributes hash |
|
1274 |
hash = extract_attr( result[ 0 ] ); |
|
1275 |
if ( !hash ) { |
|
1276 |
hash = {}; |
|
1277 |
result[ 0 ].splice( 1, 0, hash ); |
|
1278 |
} |
|
1279 |
|
|
1280 |
// attach the attributes to the block |
|
1281 |
for ( a in attr ) { |
|
1282 |
hash[ a ] = attr[ a ]; |
|
1283 |
} |
|
1284 |
|
|
1285 |
return result; |
|
1286 |
}; |
|
1287 |
|
|
1288 |
Markdown.dialects.Maruku.block.definition_list = function definition_list( block, next ) { |
|
1289 |
// one or more terms followed by one or more definitions, in a single block |
|
1290 |
var tight = /^((?:[^\s:].*\n)+):\s+([\s\S]+)$/, |
|
1291 |
list = [ "dl" ], |
|
1292 |
i, m; |
|
1293 |
|
|
1294 |
// see if we're dealing with a tight or loose block |
|
1295 |
if ( ( m = block.match( tight ) ) ) { |
|
1296 |
// pull subsequent tight DL blocks out of `next` |
|
1297 |
var blocks = [ block ]; |
|
1298 |
while ( next.length && tight.exec( next[ 0 ] ) ) { |
|
1299 |
blocks.push( next.shift() ); |
|
1300 |
} |
|
1301 |
|
|
1302 |
for ( var b = 0; b < blocks.length; ++b ) { |
|
1303 |
var m = blocks[ b ].match( tight ), |
|
1304 |
terms = m[ 1 ].replace( /\n$/, "" ).split( /\n/ ), |
|
1305 |
defns = m[ 2 ].split( /\n:\s+/ ); |
|
1306 |
|
|
1307 |
// print( uneval( m ) ); |
|
1308 |
|
|
1309 |
for ( i = 0; i < terms.length; ++i ) { |
|
1310 |
list.push( [ "dt", terms[ i ] ] ); |
|
1311 |
} |
|
1312 |
|
|
1313 |
for ( i = 0; i < defns.length; ++i ) { |
|
1314 |
// run inline processing over the definition |
|
1315 |
list.push( [ "dd" ].concat( this.processInline( defns[ i ].replace( /(\n)\s+/, "$1" ) ) ) ); |
|
1316 |
} |
|
1317 |
} |
|
1318 |
} |
|
1319 |
else { |
|
1320 |
return undefined; |
|
1321 |
} |
|
1322 |
|
|
1323 |
return [ list ]; |
|
1324 |
}; |
|
1325 |
|
|
1326 |
// splits on unescaped instances of @ch. If @ch is not a character the result |
|
1327 |
// can be unpredictable |
|
1328 |
|
|
1329 |
Markdown.dialects.Maruku.block.table = function table (block, next) { |
|
1330 |
|
|
1331 |
var _split_on_unescaped = function(s, ch) { |
|
1332 |
ch = ch || '\\s'; |
|
1333 |
if (ch.match(/^[\\|\[\]{}?*.+^$]$/)) { ch = '\\' + ch; } |
|
1334 |
var res = [ ], |
|
1335 |
r = new RegExp('^((?:\\\\.|[^\\\\' + ch + '])*)' + ch + '(.*)'), |
|
1336 |
m; |
|
1337 |
while(m = s.match(r)) { |
|
1338 |
res.push(m[1]); |
|
1339 |
s = m[2]; |
|
1340 |
} |
|
1341 |
res.push(s); |
|
1342 |
return res; |
|
1343 |
} |
|
1344 |
|
|
1345 |
var leading_pipe = /^ {0,3}\|(.+)\n {0,3}\|\s*([\-:]+[\-| :]*)\n((?:\s*\|.*(?:\n|$))*)(?=\n|$)/, |
|
1346 |
// find at least an unescaped pipe in each line |
|
1347 |
no_leading_pipe = /^ {0,3}(\S(?:\\.|[^\\|])*\|.*)\n {0,3}([\-:]+\s*\|[\-| :]*)\n((?:(?:\\.|[^\\|])*\|.*(?:\n|$))*)(?=\n|$)/, |
|
1348 |
i, m; |
|
1349 |
if (m = block.match(leading_pipe)) { |
|
1350 |
// remove leading pipes in contents |
|
1351 |
// (header and horizontal rule already have the leading pipe left out) |
|
1352 |
m[3] = m[3].replace(/^\s*\|/gm, ''); |
|
1353 |
} else if (! ( m = block.match(no_leading_pipe))) { |
|
1354 |
return undefined; |
|
1355 |
} |
|
1356 |
|
|
1357 |
var table = [ "table", [ "thead", [ "tr" ] ], [ "tbody" ] ]; |
|
1358 |
|
|
1359 |
// remove trailing pipes, then split on pipes |
|
1360 |
// (no escaped pipes are allowed in horizontal rule) |
|
1361 |
m[2] = m[2].replace(/\|\s*$/, '').split('|'); |
|
1362 |
|
|
1363 |
// process alignment |
|
1364 |
var html_attrs = [ ]; |
|
1365 |
forEach (m[2], function (s) { |
|
1366 |
if (s.match(/^\s*-+:\s*$/)) html_attrs.push({align: "right"}); |
|
1367 |
else if (s.match(/^\s*:-+\s*$/)) html_attrs.push({align: "left"}); |
|
1368 |
else if (s.match(/^\s*:-+:\s*$/)) html_attrs.push({align: "center"}); |
|
1369 |
else html_attrs.push({}); |
|
1370 |
}); |
|
1371 |
|
|
1372 |
// now for the header, avoid escaped pipes |
|
1373 |
m[1] = _split_on_unescaped(m[1].replace(/\|\s*$/, ''), '|'); |
|
1374 |
for (i = 0; i < m[1].length; i++) { |
|
1375 |
table[1][1].push(['th', html_attrs[i] || {}].concat( |
|
1376 |
this.processInline(m[1][i].trim()))); |
|
1377 |
} |
|
1378 |
|
|
1379 |
// now for body contents |
|
1380 |
forEach (m[3].replace(/\|\s*$/mg, '').split('\n'), function (row) { |
|
1381 |
var html_row = ['tr']; |
|
1382 |
row = _split_on_unescaped(row, '|'); |
|
1383 |
for (i = 0; i < row.length; i++) { |
|
1384 |
html_row.push(['td', html_attrs[i] || {}].concat(this.processInline(row[i].trim()))); |
|
1385 |
} |
|
1386 |
table[2].push(html_row); |
|
1387 |
}, this); |
|
1388 |
|
|
1389 |
return [table]; |
|
1390 |
} |
|
1391 |
|
|
1392 |
Markdown.dialects.Maruku.inline[ "{:" ] = function inline_meta( text, matches, out ) { |
|
1393 |
if ( !out.length ) { |
|
1394 |
return [ 2, "{:" ]; |
|
1395 |
} |
|
1396 |
|
|
1397 |
// get the preceeding element |
|
1398 |
var before = out[ out.length - 1 ]; |
|
1399 |
|
|
1400 |
if ( typeof before === "string" ) { |
|
1401 |
return [ 2, "{:" ]; |
|
1402 |
} |
|
1403 |
|
|
1404 |
// match a meta hash |
|
1405 |
var m = text.match( /^\{:\s*((?:\\\}|[^\}])*)\s*\}/ ); |
|
1406 |
|
|
1407 |
// no match, false alarm |
|
1408 |
if ( !m ) { |
|
1409 |
return [ 2, "{:" ]; |
|
1410 |
} |
|
1411 |
|
|
1412 |
// attach the attributes to the preceeding element |
|
1413 |
var meta = this.dialect.processMetaHash( m[ 1 ] ), |
|
1414 |
attr = extract_attr( before ); |
|
1415 |
|
|
1416 |
if ( !attr ) { |
|
1417 |
attr = {}; |
|
1418 |
before.splice( 1, 0, attr ); |
|
1419 |
} |
|
1420 |
|
|
1421 |
for ( var k in meta ) { |
|
1422 |
attr[ k ] = meta[ k ]; |
|
1423 |
} |
|
1424 |
|
|
1425 |
// cut out the string and replace it with nothing |
|
1426 |
return [ m[ 0 ].length, "" ]; |
|
1427 |
}; |
|
1428 |
|
|
1429 |
Markdown.dialects.Maruku.inline.__escape__ = /^\\[\\`\*_{}\[\]()#\+.!\-|:]/; |
|
1430 |
|
|
1431 |
Markdown.buildBlockOrder ( Markdown.dialects.Maruku.block ); |
|
1432 |
Markdown.buildInlinePatterns( Markdown.dialects.Maruku.inline ); |
|
1433 |
|
|
1434 |
var isArray = Array.isArray || function(obj) { |
|
1435 |
return Object.prototype.toString.call(obj) == "[object Array]"; |
|
1436 |
}; |
|
1437 |
|
|
1438 |
var forEach; |
|
1439 |
// Don't mess with Array.prototype. Its not friendly |
|
1440 |
if ( Array.prototype.forEach ) { |
|
1441 |
forEach = function( arr, cb, thisp ) { |
|
1442 |
return arr.forEach( cb, thisp ); |
|
1443 |
}; |
|
1444 |
} |
|
1445 |
else { |
|
1446 |
forEach = function(arr, cb, thisp) { |
|
1447 |
for (var i = 0; i < arr.length; i++) { |
|
1448 |
cb.call(thisp || arr, arr[i], i, arr); |
|
1449 |
} |
|
1450 |
} |
|
1451 |
} |
|
1452 |
|
|
1453 |
var isEmpty = function( obj ) { |
|
1454 |
for ( var key in obj ) { |
|
1455 |
if ( hasOwnProperty.call( obj, key ) ) { |
|
1456 |
return false; |
|
1457 |
} |
|
1458 |
} |
|
1459 |
|
|
1460 |
return true; |
|
1461 |
} |
|
1462 |
|
|
1463 |
function extract_attr( jsonml ) { |
|
1464 |
return isArray(jsonml) |
|
1465 |
&& jsonml.length > 1 |
|
1466 |
&& typeof jsonml[ 1 ] === "object" |
|
1467 |
&& !( isArray(jsonml[ 1 ]) ) |
|
1468 |
? jsonml[ 1 ] |
|
1469 |
: undefined; |
|
1470 |
} |
|
1471 |
|
|
1472 |
|
|
1473 |
|
|
1474 |
/** |
|
1475 |
* renderJsonML( jsonml[, options] ) -> String |
|
1476 |
* - jsonml (Array): JsonML array to render to XML |
|
1477 |
* - options (Object): options |
|
1478 |
* |
|
1479 |
* Converts the given JsonML into well-formed XML. |
|
1480 |
* |
|
1481 |
* The options currently understood are: |
|
1482 |
* |
|
1483 |
* - root (Boolean): wether or not the root node should be included in the |
|
1484 |
* output, or just its children. The default `false` is to not include the |
|
1485 |
* root itself. |
|
1486 |
*/ |
|
1487 |
expose.renderJsonML = function( jsonml, options ) { |
|
1488 |
options = options || {}; |
|
1489 |
// include the root element in the rendered output? |
|
1490 |
options.root = options.root || false; |
|
1491 |
|
|
1492 |
var content = []; |
|
1493 |
|
|
1494 |
if ( options.root ) { |
|
1495 |
content.push( render_tree( jsonml ) ); |
|
1496 |
} |
|
1497 |
else { |
|
1498 |
jsonml.shift(); // get rid of the tag |
|
1499 |
if ( jsonml.length && typeof jsonml[ 0 ] === "object" && !( jsonml[ 0 ] instanceof Array ) ) { |
|
1500 |
jsonml.shift(); // get rid of the attributes |
|
1501 |
} |
|
1502 |
|
|
1503 |
while ( jsonml.length ) { |
|
1504 |
content.push( render_tree( jsonml.shift() ) ); |
|
1505 |
} |
|
1506 |
} |
|
1507 |
|
|
1508 |
return content.join( "\n\n" ); |
|
1509 |
}; |
|
1510 |
|
|
1511 |
function escapeHTML( text ) { |
|
1512 |
return text.replace( /&/g, "&" ) |
|
1513 |
.replace( /</g, "<" ) |
|
1514 |
.replace( />/g, ">" ) |
|
1515 |
.replace( /"/g, """ ) |
|
1516 |
.replace( /'/g, "'" ); |
|
1517 |
} |
|
1518 |
|
|
1519 |
function render_tree( jsonml ) { |
|
1520 |
// basic case |
|
1521 |
if ( typeof jsonml === "string" ) { |
|
1522 |
return escapeHTML( jsonml ); |
|
1523 |
} |
|
1524 |
|
|
1525 |
var tag = jsonml.shift(), |
|
1526 |
attributes = {}, |
|
1527 |
content = []; |
|
1528 |
|
|
1529 |
if ( jsonml.length && typeof jsonml[ 0 ] === "object" && !( jsonml[ 0 ] instanceof Array ) ) { |
|
1530 |
attributes = jsonml.shift(); |
|
1531 |
} |
|
1532 |
|
|
1533 |
while ( jsonml.length ) { |
|
1534 |
content.push( render_tree( jsonml.shift() ) ); |
|
1535 |
} |
|
1536 |
|
|
1537 |
var tag_attrs = ""; |
|
1538 |
for ( var a in attributes ) { |
|
1539 |
tag_attrs += " " + a + '="' + escapeHTML( attributes[ a ] ) + '"'; |
|
1540 |
} |
|
1541 |
|
|
1542 |
// be careful about adding whitespace here for inline elements |
|
1543 |
if ( tag == "img" || tag == "br" || tag == "hr" ) { |
|
1544 |
return "<"+ tag + tag_attrs + "/>"; |
|
1545 |
} |
|
1546 |
else { |
|
1547 |
return "<"+ tag + tag_attrs + ">" + content.join( "" ) + "</" + tag + ">"; |
|
1548 |
} |
|
1549 |
} |
|
1550 |
|
|
1551 |
function convert_tree_to_html( tree, references, options ) { |
|
1552 |
var i; |
|
1553 |
options = options || {}; |
|
1554 |
|
|
1555 |
// shallow clone |
|
1556 |
var jsonml = tree.slice( 0 ); |
|
1557 |
|
|
1558 |
if ( typeof options.preprocessTreeNode === "function" ) { |
|
1559 |
jsonml = options.preprocessTreeNode(jsonml, references); |
|
1560 |
} |
|
1561 |
|
|
1562 |
// Clone attributes if they exist |
|
1563 |
var attrs = extract_attr( jsonml ); |
|
1564 |
if ( attrs ) { |
|
1565 |
jsonml[ 1 ] = {}; |
|
1566 |
for ( i in attrs ) { |
|
1567 |
jsonml[ 1 ][ i ] = attrs[ i ]; |
|
1568 |
} |
|
1569 |
attrs = jsonml[ 1 ]; |
|
1570 |
} |
|
1571 |
|
|
1572 |
// basic case |
|
1573 |
if ( typeof jsonml === "string" ) { |
|
1574 |
return jsonml; |
|
1575 |
} |
|
1576 |
|
|
1577 |
// convert this node |
|
1578 |
switch ( jsonml[ 0 ] ) { |
|
1579 |
case "header": |
|
1580 |
jsonml[ 0 ] = "h" + jsonml[ 1 ].level; |
|
1581 |
delete jsonml[ 1 ].level; |
|
1582 |
break; |
|
1583 |
case "bulletlist": |
|
1584 |
jsonml[ 0 ] = "ul"; |
|
1585 |
break; |
|
1586 |
case "numberlist": |
|
1587 |
jsonml[ 0 ] = "ol"; |
|
1588 |
break; |
|
1589 |
case "listitem": |
|
1590 |
jsonml[ 0 ] = "li"; |
|
1591 |
break; |
|
1592 |
case "para": |
|
1593 |
jsonml[ 0 ] = "p"; |
|
1594 |
break; |
|
1595 |
case "markdown": |
|
1596 |
jsonml[ 0 ] = "html"; |
|
1597 |
if ( attrs ) delete attrs.references; |
|
1598 |
break; |
|
1599 |
case "code_block": |
|
1600 |
jsonml[ 0 ] = "pre"; |
|
1601 |
i = attrs ? 2 : 1; |
|
1602 |
var code = [ "code" ]; |
|
1603 |
code.push.apply( code, jsonml.splice( i, jsonml.length - i ) ); |
|
1604 |
jsonml[ i ] = code; |
|
1605 |
break; |
|
1606 |
case "inlinecode": |
|
1607 |
jsonml[ 0 ] = "code"; |
|
1608 |
break; |
|
1609 |
case "img": |
|
1610 |
jsonml[ 1 ].src = jsonml[ 1 ].href; |
|
1611 |
delete jsonml[ 1 ].href; |
|
1612 |
break; |
|
1613 |
case "linebreak": |
|
1614 |
jsonml[ 0 ] = "br"; |
|
1615 |
break; |
|
1616 |
case "link": |
|
1617 |
jsonml[ 0 ] = "a"; |
|
1618 |
break; |
|
1619 |
case "link_ref": |
|
1620 |
jsonml[ 0 ] = "a"; |
|
1621 |
|
|
1622 |
// grab this ref and clean up the attribute node |
|
1623 |
var ref = references[ attrs.ref ]; |
|
1624 |
|
|
1625 |
// if the reference exists, make the link |
|
1626 |
if ( ref ) { |
|
1627 |
delete attrs.ref; |
|
1628 |
|
|
1629 |
// add in the href and title, if present |
|
1630 |
attrs.href = ref.href; |
|
1631 |
if ( ref.title ) { |
|
1632 |
attrs.title = ref.title; |
|
1633 |
} |
|
1634 |
|
|
1635 |
// get rid of the unneeded original text |
|
1636 |
delete attrs.original; |
|
1637 |
} |
|
1638 |
// the reference doesn't exist, so revert to plain text |
|
1639 |
else { |
|
1640 |
return attrs.original; |
|
1641 |
} |
|
1642 |
break; |
|
1643 |
case "img_ref": |
|
1644 |
jsonml[ 0 ] = "img"; |
|
1645 |
|
|
1646 |
// grab this ref and clean up the attribute node |
|
1647 |
var ref = references[ attrs.ref ]; |
|
1648 |
|
|
1649 |
// if the reference exists, make the link |
|
1650 |
if ( ref ) { |
|
1651 |
delete attrs.ref; |
|
1652 |
|
|
1653 |
// add in the href and title, if present |
|
1654 |
attrs.src = ref.href; |
|
1655 |
if ( ref.title ) { |
|
1656 |
attrs.title = ref.title; |
|
1657 |
} |
|
1658 |
|
|
1659 |
// get rid of the unneeded original text |
|
1660 |
delete attrs.original; |
|
1661 |
} |
|
1662 |
// the reference doesn't exist, so revert to plain text |
|
1663 |
else { |
|
1664 |
return attrs.original; |
|
1665 |
} |
|
1666 |
break; |
|
1667 |
} |
|
1668 |
|
|
1669 |
// convert all the children |
|
1670 |
i = 1; |
|
1671 |
|
|
1672 |
// deal with the attribute node, if it exists |
|
1673 |
if ( attrs ) { |
|
1674 |
// if there are keys, skip over it |
|
1675 |
for ( var key in jsonml[ 1 ] ) { |
|
1676 |
i = 2; |
|
1677 |
break; |
|
1678 |
} |
|
1679 |
// if there aren't, remove it |
|
1680 |
if ( i === 1 ) { |
|
1681 |
jsonml.splice( i, 1 ); |
|
1682 |
} |
|
1683 |
} |
|
1684 |
|
|
1685 |
for ( ; i < jsonml.length; ++i ) { |
|
1686 |
jsonml[ i ] = convert_tree_to_html( jsonml[ i ], references, options ); |
|
1687 |
} |
|
1688 |
|
|
1689 |
return jsonml; |
|
1690 |
} |
|
1691 |
|
|
1692 |
|
|
1693 |
// merges adjacent text nodes into a single node |
|
1694 |
function merge_text_nodes( jsonml ) { |
|
1695 |
// skip the tag name and attribute hash |
|
1696 |
var i = extract_attr( jsonml ) ? 2 : 1; |
|
1697 |
|
|
1698 |
while ( i < jsonml.length ) { |
|
1699 |
// if it's a string check the next item too |
|
1700 |
if ( typeof jsonml[ i ] === "string" ) { |
|
1701 |
if ( i + 1 < jsonml.length && typeof jsonml[ i + 1 ] === "string" ) { |
|
1702 |
// merge the second string into the first and remove it |
|
1703 |
jsonml[ i ] += jsonml.splice( i + 1, 1 )[ 0 ]; |
|
1704 |
} |
|
1705 |
else { |
|
1706 |
++i; |
|
1707 |
} |
|
1708 |
} |
|
1709 |
// if it's not a string recurse |
|
1710 |
else { |
|
1711 |
merge_text_nodes( jsonml[ i ] ); |
|
1712 |
++i; |
|
1713 |
} |
|
1714 |
} |
|
1715 |
} |
|
1716 |
|
|
1717 |
} )( (function() { |
|
1718 |
if ( typeof exports === "undefined" ) { |
|
1719 |
window.markdown = {}; |
|
1720 |
return window.markdown; |
|
1721 |
} |
|
1722 |
else { |
|
1723 |
return exports; |
|
1724 |
} |
|
1725 |
} )() ); |