From 3306203fe93c7db3adb843be194f6eec28dbaa01 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Hello Goodbye List List List List List Nested list ####### Header 7 #Header 1 ##Header 2 ###Header 3 ####Header 4 #####Header 5 ######Header 6 #######Header 7 Hello Goodbye List List List Nested list } Hello Goodbye List List List List List Nested list List List List List List Nested list Hello Goodbye List List List List List Nested list Hello Goodbye List List List List List Nested list Paragraph Paragraph ======== Paragraph ===== Paragraph Paragraph Paragraph ===== - -- * ** _ __ -*- -----* Hello Yin Yang Ting Bong Goo Yin Yang Ting Bong Goo Yin Yang Ting Bong Goo *Hello Paragraph\n* No linebreak Paragraph List List\nSecond line List Continued List List continues List List normal text Foo Foo Yin Yang Ting Bong Goo 1 Hello 1.Hello Paragraph\n1. No linebreak Paragraph List List\nSecond line List Continued List List Foo Definition a Definition b Definition a Definition b Definition c Term 1\n:Definition a Definition a Definition b Text 1 Definition a Text 1 Definition b Text 2 Definition a Text 1
tag
- if size := p.htmlHr(out, data, doRender); size > 0 {
- return size
- }
-
- // check for HTML CDATA
- if size := p.htmlCDATA(out, data, doRender); size > 0 {
- return size
- }
-
- // no special case recognized
- return 0
- }
-
- // look for an unindented matching closing tag
- // followed by a blank line
- found := false
- /*
- closetag := []byte("\n" + curtag + ">")
- j = len(curtag) + 1
- for !found {
- // scan for a closing tag at the beginning of a line
- if skip := bytes.Index(data[j:], closetag); skip >= 0 {
- j += skip + len(closetag)
- } else {
- break
- }
-
- // see if it is the only thing on the line
- if skip := p.isEmpty(data[j:]); skip > 0 {
- // see if it is followed by a blank line/eof
- j += skip
- if j >= len(data) {
- found = true
- i = j
- } else {
- if skip := p.isEmpty(data[j:]); skip > 0 {
- j += skip
- found = true
- i = j
- }
- }
- }
- }
- */
-
- // if not found, try a second pass looking for indented match
- // but not if tag is "ins" or "del" (following original Markdown.pl)
- if !found && curtag != "ins" && curtag != "del" {
- i = 1
- for i < len(data) {
- i++
- for i < len(data) && !(data[i-1] == '<' && data[i] == '/') {
- i++
- }
-
- if i+2+len(curtag) >= len(data) {
- break
- }
-
- j = p.htmlFindEnd(curtag, data[i-1:])
-
- if j > 0 {
- i += j - 1
- found = true
- break
- }
- }
- }
-
- if !found {
- return 0
- }
-
- // the end of the block has been found
- if doRender {
- // trim newlines
- end := i
- for end > 0 && data[end-1] == '\n' {
- end--
- }
- p.r.BlockHtml(out, data[:end])
- }
-
- return i
-}
-
-func (p *parser) renderHTMLBlock(out *bytes.Buffer, data []byte, start int, doRender bool) int {
- // html block needs to end with a blank line
- if i := p.isEmpty(data[start:]); i > 0 {
- size := start + i
- if doRender {
- // trim trailing newlines
- end := size
- for end > 0 && data[end-1] == '\n' {
- end--
- }
- p.r.BlockHtml(out, data[:end])
- }
- return size
- }
- return 0
-}
-
-// HTML comment, lax form
-func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int {
- i := p.inlineHTMLComment(out, data)
- return p.renderHTMLBlock(out, data, i, doRender)
-}
-
-// HTML CDATA section
-func (p *parser) htmlCDATA(out *bytes.Buffer, data []byte, doRender bool) int {
- const cdataTag = "') {
- i++
- }
- i++
- // no end-of-comment marker
- if i >= len(data) {
- return 0
- }
- return p.renderHTMLBlock(out, data, i, doRender)
-}
-
-// HR, which is the only self-closing block tag considered
-func (p *parser) htmlHr(out *bytes.Buffer, data []byte, doRender bool) int {
- if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') {
- return 0
- }
- if data[3] != ' ' && data[3] != '/' && data[3] != '>' {
- // not an
tag after all; at least not a valid one
- return 0
- }
-
- i := 3
- for data[i] != '>' && data[i] != '\n' {
- i++
- }
-
- if data[i] == '>' {
- return p.renderHTMLBlock(out, data, i+1, doRender)
- }
-
- return 0
-}
-
-func (p *parser) htmlFindTag(data []byte) (string, bool) {
- i := 0
- for isalnum(data[i]) {
- i++
- }
- key := string(data[:i])
- if _, ok := blockTags[key]; ok {
- return key, true
- }
- return "", false
-}
-
-func (p *parser) htmlFindEnd(tag string, data []byte) int {
- // assume data[0] == '<' && data[1] == '/' already tested
-
- // check if tag is a match
- closetag := []byte("" + tag + ">")
- if !bytes.HasPrefix(data, closetag) {
- return 0
- }
- i := len(closetag)
-
- // check that the rest of the line is blank
- skip := 0
- if skip = p.isEmpty(data[i:]); skip == 0 {
- return 0
- }
- i += skip
- skip = 0
-
- if i >= len(data) {
- return i
- }
-
- if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 {
- return i
- }
- if skip = p.isEmpty(data[i:]); skip == 0 {
- // following line must be blank
- return 0
- }
-
- return i + skip
-}
-
-func (*parser) isEmpty(data []byte) int {
- // it is okay to call isEmpty on an empty buffer
- if len(data) == 0 {
- return 0
- }
-
- var i int
- for i = 0; i < len(data) && data[i] != '\n'; i++ {
- if data[i] != ' ' && data[i] != '\t' {
- return 0
- }
- }
- return i + 1
-}
-
-func (*parser) isHRule(data []byte) bool {
- i := 0
-
- // skip up to three spaces
- for i < 3 && data[i] == ' ' {
- i++
- }
-
- // look at the hrule char
- if data[i] != '*' && data[i] != '-' && data[i] != '_' {
- return false
- }
- c := data[i]
-
- // the whole line must be the char or whitespace
- n := 0
- for data[i] != '\n' {
- switch {
- case data[i] == c:
- n++
- case data[i] != ' ':
- return false
- }
- i++
- }
-
- return n >= 3
-}
-
-// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data,
-// and returns the end index if so, or 0 otherwise. It also returns the marker found.
-// If syntax is not nil, it gets set to the syntax specified in the fence line.
-// A final newline is mandatory to recognize the fence line, unless newlineOptional is true.
-func isFenceLine(data []byte, info *string, oldmarker string, newlineOptional bool) (end int, marker string) {
- i, size := 0, 0
-
- // skip up to three spaces
- for i < len(data) && i < 3 && data[i] == ' ' {
- i++
- }
-
- // check for the marker characters: ~ or `
- if i >= len(data) {
- return 0, ""
- }
- if data[i] != '~' && data[i] != '`' {
- return 0, ""
- }
-
- c := data[i]
-
- // the whole line must be the same char or whitespace
- for i < len(data) && data[i] == c {
- size++
- i++
- }
-
- // the marker char must occur at least 3 times
- if size < 3 {
- return 0, ""
- }
- marker = string(data[i-size : i])
-
- // if this is the end marker, it must match the beginning marker
- if oldmarker != "" && marker != oldmarker {
- return 0, ""
- }
-
- // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here
- // into one, always get the info string, and discard it if the caller doesn't care.
- if info != nil {
- infoLength := 0
- i = skipChar(data, i, ' ')
-
- if i >= len(data) {
- if newlineOptional && i == len(data) {
- return i, marker
- }
- return 0, ""
- }
-
- infoStart := i
-
- if data[i] == '{' {
- i++
- infoStart++
-
- for i < len(data) && data[i] != '}' && data[i] != '\n' {
- infoLength++
- i++
- }
-
- if i >= len(data) || data[i] != '}' {
- return 0, ""
- }
-
- // strip all whitespace at the beginning and the end
- // of the {} block
- for infoLength > 0 && isspace(data[infoStart]) {
- infoStart++
- infoLength--
- }
-
- for infoLength > 0 && isspace(data[infoStart+infoLength-1]) {
- infoLength--
- }
-
- i++
- } else {
- for i < len(data) && !isverticalspace(data[i]) {
- infoLength++
- i++
- }
- }
-
- *info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength]))
- }
-
- i = skipChar(data, i, ' ')
- if i >= len(data) {
- if newlineOptional {
- return i, marker
- }
- return 0, ""
- }
- if data[i] == '\n' {
- i++ // Take newline into account
- }
-
- return i, marker
-}
-
-// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning,
-// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects.
-// If doRender is true, a final newline is mandatory to recognize the fenced code block.
-func (p *parser) fencedCodeBlock(out *bytes.Buffer, data []byte, doRender bool) int {
- var infoString string
- beg, marker := isFenceLine(data, &infoString, "", false)
- if beg == 0 || beg >= len(data) {
- return 0
- }
-
- var work bytes.Buffer
-
- for {
- // safe to assume beg < len(data)
-
- // check for the end of the code block
- newlineOptional := !doRender
- fenceEnd, _ := isFenceLine(data[beg:], nil, marker, newlineOptional)
- if fenceEnd != 0 {
- beg += fenceEnd
- break
- }
-
- // copy the current line
- end := skipUntilChar(data, beg, '\n') + 1
-
- // did we reach the end of the buffer without a closing marker?
- if end >= len(data) {
- return 0
- }
-
- // verbatim copy to the working buffer
- if doRender {
- work.Write(data[beg:end])
- }
- beg = end
- }
-
- if doRender {
- p.r.BlockCode(out, work.Bytes(), infoString)
- }
-
- return beg
-}
-
-func (p *parser) table(out *bytes.Buffer, data []byte) int {
- var header bytes.Buffer
- i, columns := p.tableHeader(&header, data)
- if i == 0 {
- return 0
- }
-
- var body bytes.Buffer
-
- for i < len(data) {
- pipes, rowStart := 0, i
- for ; data[i] != '\n'; i++ {
- if data[i] == '|' {
- pipes++
- }
- }
-
- if pipes == 0 {
- i = rowStart
- break
- }
-
- // include the newline in data sent to tableRow
- i++
- p.tableRow(&body, data[rowStart:i], columns, false)
- }
-
- p.r.Table(out, header.Bytes(), body.Bytes(), columns)
-
- return i
-}
-
-// check if the specified position is preceded by an odd number of backslashes
-func isBackslashEscaped(data []byte, i int) bool {
- backslashes := 0
- for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' {
- backslashes++
- }
- return backslashes&1 == 1
-}
-
-func (p *parser) tableHeader(out *bytes.Buffer, data []byte) (size int, columns []int) {
- i := 0
- colCount := 1
- for i = 0; data[i] != '\n'; i++ {
- if data[i] == '|' && !isBackslashEscaped(data, i) {
- colCount++
- }
- }
-
- // doesn't look like a table header
- if colCount == 1 {
- return
- }
-
- // include the newline in the data sent to tableRow
- header := data[:i+1]
-
- // column count ignores pipes at beginning or end of line
- if data[0] == '|' {
- colCount--
- }
- if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) {
- colCount--
- }
-
- columns = make([]int, colCount)
-
- // move on to the header underline
- i++
- if i >= len(data) {
- return
- }
-
- if data[i] == '|' && !isBackslashEscaped(data, i) {
- i++
- }
- i = skipChar(data, i, ' ')
-
- // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3
- // and trailing | optional on last column
- col := 0
- for data[i] != '\n' {
- dashes := 0
-
- if data[i] == ':' {
- i++
- columns[col] |= TABLE_ALIGNMENT_LEFT
- dashes++
- }
- for data[i] == '-' {
- i++
- dashes++
- }
- if data[i] == ':' {
- i++
- columns[col] |= TABLE_ALIGNMENT_RIGHT
- dashes++
- }
- for data[i] == ' ' {
- i++
- }
-
- // end of column test is messy
- switch {
- case dashes < 3:
- // not a valid column
- return
-
- case data[i] == '|' && !isBackslashEscaped(data, i):
- // marker found, now skip past trailing whitespace
- col++
- i++
- for data[i] == ' ' {
- i++
- }
-
- // trailing junk found after last column
- if col >= colCount && data[i] != '\n' {
- return
- }
-
- case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount:
- // something else found where marker was required
- return
-
- case data[i] == '\n':
- // marker is optional for the last column
- col++
-
- default:
- // trailing junk found after last column
- return
- }
- }
- if col != colCount {
- return
- }
-
- p.tableRow(out, header, columns, true)
- size = i + 1
- return
-}
-
-func (p *parser) tableRow(out *bytes.Buffer, data []byte, columns []int, header bool) {
- i, col := 0, 0
- var rowWork bytes.Buffer
-
- if data[i] == '|' && !isBackslashEscaped(data, i) {
- i++
- }
-
- for col = 0; col < len(columns) && i < len(data); col++ {
- for data[i] == ' ' {
- i++
- }
-
- cellStart := i
-
- for (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' {
- i++
- }
-
- cellEnd := i
-
- // skip the end-of-cell marker, possibly taking us past end of buffer
- i++
-
- for cellEnd > cellStart && data[cellEnd-1] == ' ' {
- cellEnd--
- }
-
- var cellWork bytes.Buffer
- p.inline(&cellWork, data[cellStart:cellEnd])
-
- if header {
- p.r.TableHeaderCell(&rowWork, cellWork.Bytes(), columns[col])
- } else {
- p.r.TableCell(&rowWork, cellWork.Bytes(), columns[col])
- }
- }
-
- // pad it out with empty columns to get the right number
- for ; col < len(columns); col++ {
- if header {
- p.r.TableHeaderCell(&rowWork, nil, columns[col])
- } else {
- p.r.TableCell(&rowWork, nil, columns[col])
- }
- }
-
- // silently ignore rows with too many cells
-
- p.r.TableRow(out, rowWork.Bytes())
-}
-
-// returns blockquote prefix length
-func (p *parser) quotePrefix(data []byte) int {
- i := 0
- for i < 3 && data[i] == ' ' {
- i++
- }
- if data[i] == '>' {
- if data[i+1] == ' ' {
- return i + 2
- }
- return i + 1
- }
- return 0
-}
-
-// blockquote ends with at least one blank line
-// followed by something without a blockquote prefix
-func (p *parser) terminateBlockquote(data []byte, beg, end int) bool {
- if p.isEmpty(data[beg:]) <= 0 {
- return false
- }
- if end >= len(data) {
- return true
- }
- return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0
-}
-
-// parse a blockquote fragment
-func (p *parser) quote(out *bytes.Buffer, data []byte) int {
- var raw bytes.Buffer
- beg, end := 0, 0
- for beg < len(data) {
- end = beg
- // Step over whole lines, collecting them. While doing that, check for
- // fenced code and if one's found, incorporate it altogether,
- // irregardless of any contents inside it
- for data[end] != '\n' {
- if p.flags&EXTENSION_FENCED_CODE != 0 {
- if i := p.fencedCodeBlock(out, data[end:], false); i > 0 {
- // -1 to compensate for the extra end++ after the loop:
- end += i - 1
- break
- }
- }
- end++
- }
- end++
-
- if pre := p.quotePrefix(data[beg:]); pre > 0 {
- // skip the prefix
- beg += pre
- } else if p.terminateBlockquote(data, beg, end) {
- break
- }
-
- // this line is part of the blockquote
- raw.Write(data[beg:end])
- beg = end
- }
-
- var cooked bytes.Buffer
- p.block(&cooked, raw.Bytes())
- p.r.BlockQuote(out, cooked.Bytes())
- return end
-}
-
-// returns prefix length for block code
-func (p *parser) codePrefix(data []byte) int {
- if data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' {
- return 4
- }
- return 0
-}
-
-func (p *parser) code(out *bytes.Buffer, data []byte) int {
- var work bytes.Buffer
-
- i := 0
- for i < len(data) {
- beg := i
- for data[i] != '\n' {
- i++
- }
- i++
-
- blankline := p.isEmpty(data[beg:i]) > 0
- if pre := p.codePrefix(data[beg:i]); pre > 0 {
- beg += pre
- } else if !blankline {
- // non-empty, non-prefixed line breaks the pre
- i = beg
- break
- }
-
- // verbatim copy to the working buffeu
- if blankline {
- work.WriteByte('\n')
- } else {
- work.Write(data[beg:i])
- }
- }
-
- // trim all the \n off the end of work
- workbytes := work.Bytes()
- eol := len(workbytes)
- for eol > 0 && workbytes[eol-1] == '\n' {
- eol--
- }
- if eol != len(workbytes) {
- work.Truncate(eol)
- }
-
- work.WriteByte('\n')
-
- p.r.BlockCode(out, work.Bytes(), "")
-
- return i
-}
-
-// returns unordered list item prefix
-func (p *parser) uliPrefix(data []byte) int {
- i := 0
-
- // start with up to 3 spaces
- for i < 3 && data[i] == ' ' {
- i++
- }
-
- // need a *, +, or - followed by a space
- if (data[i] != '*' && data[i] != '+' && data[i] != '-') ||
- data[i+1] != ' ' {
- return 0
- }
- return i + 2
-}
-
-// returns ordered list item prefix
-func (p *parser) oliPrefix(data []byte) int {
- i := 0
-
- // start with up to 3 spaces
- for i < 3 && data[i] == ' ' {
- i++
- }
-
- // count the digits
- start := i
- for data[i] >= '0' && data[i] <= '9' {
- i++
- }
-
- // we need >= 1 digits followed by a dot and a space
- if start == i || data[i] != '.' || data[i+1] != ' ' {
- return 0
- }
- return i + 2
-}
-
-// returns definition list item prefix
-func (p *parser) dliPrefix(data []byte) int {
- i := 0
-
- // need a : followed by a spaces
- if data[i] != ':' || data[i+1] != ' ' {
- return 0
- }
- for data[i] == ' ' {
- i++
- }
- return i + 2
-}
-
-// parse ordered or unordered list block
-func (p *parser) list(out *bytes.Buffer, data []byte, flags int) int {
- i := 0
- flags |= LIST_ITEM_BEGINNING_OF_LIST
- work := func() bool {
- for i < len(data) {
- skip := p.listItem(out, data[i:], &flags)
- i += skip
-
- if skip == 0 || flags&LIST_ITEM_END_OF_LIST != 0 {
- break
- }
- flags &= ^LIST_ITEM_BEGINNING_OF_LIST
- }
- return true
- }
-
- p.r.List(out, work, flags)
- return i
-}
-
-// Parse a single list item.
-// Assumes initial prefix is already removed if this is a sublist.
-func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int {
- // keep track of the indentation of the first line
- itemIndent := 0
- for itemIndent < 3 && data[itemIndent] == ' ' {
- itemIndent++
- }
-
- i := p.uliPrefix(data)
- if i == 0 {
- i = p.oliPrefix(data)
- }
- if i == 0 {
- i = p.dliPrefix(data)
- // reset definition term flag
- if i > 0 {
- *flags &= ^LIST_TYPE_TERM
- }
- }
- if i == 0 {
- // if in defnition list, set term flag and continue
- if *flags&LIST_TYPE_DEFINITION != 0 {
- *flags |= LIST_TYPE_TERM
- } else {
- return 0
- }
- }
-
- // skip leading whitespace on first line
- for data[i] == ' ' {
- i++
- }
-
- // find the end of the line
- line := i
- for i > 0 && data[i-1] != '\n' {
- i++
- }
-
- // process the following lines
- containsBlankLine := false
- sublist := 0
- codeBlockMarker := ""
- if p.flags&EXTENSION_FENCED_CODE != 0 && i > line {
- // determine if codeblock starts on the first line
- _, codeBlockMarker = isFenceLine(data[line:i], nil, "", false)
- }
-
- // get working buffer
- var raw bytes.Buffer
-
- // put the first line into the working buffer
- raw.Write(data[line:i])
- line = i
-
-gatherlines:
- for line < len(data) {
- i++
-
- // find the end of this line
- for data[i-1] != '\n' {
- i++
- }
- // if it is an empty line, guess that it is part of this item
- // and move on to the next line
- if p.isEmpty(data[line:i]) > 0 {
- containsBlankLine = true
- raw.Write(data[line:i])
- line = i
- continue
- }
-
- // calculate the indentation
- indent := 0
- for indent < 4 && line+indent < i && data[line+indent] == ' ' {
- indent++
- }
-
- chunk := data[line+indent : i]
-
- if p.flags&EXTENSION_FENCED_CODE != 0 {
- // determine if in or out of codeblock
- // if in codeblock, ignore normal list processing
- _, marker := isFenceLine(chunk, nil, codeBlockMarker, false)
- if marker != "" {
- if codeBlockMarker == "" {
- // start of codeblock
- codeBlockMarker = marker
- } else {
- // end of codeblock.
- *flags |= LIST_ITEM_CONTAINS_BLOCK
- codeBlockMarker = ""
- }
- }
- // we are in a codeblock, write line, and continue
- if codeBlockMarker != "" || marker != "" {
- raw.Write(data[line+indent : i])
- line = i
- continue gatherlines
- }
- }
-
- // evaluate how this line fits in
- switch {
- // is this a nested list item?
- case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) ||
- p.oliPrefix(chunk) > 0 ||
- p.dliPrefix(chunk) > 0:
-
- if containsBlankLine {
- // end the list if the type changed after a blank line
- if indent <= itemIndent &&
- ((*flags&LIST_TYPE_ORDERED != 0 && p.uliPrefix(chunk) > 0) ||
- (*flags&LIST_TYPE_ORDERED == 0 && p.oliPrefix(chunk) > 0)) {
-
- *flags |= LIST_ITEM_END_OF_LIST
- break gatherlines
- }
- *flags |= LIST_ITEM_CONTAINS_BLOCK
- }
-
- // to be a nested list, it must be indented more
- // if not, it is the next item in the same list
- if indent <= itemIndent {
- break gatherlines
- }
-
- // is this the first item in the nested list?
- if sublist == 0 {
- sublist = raw.Len()
- }
-
- // is this a nested prefix header?
- case p.isPrefixHeader(chunk):
- // if the header is not indented, it is not nested in the list
- // and thus ends the list
- if containsBlankLine && indent < 4 {
- *flags |= LIST_ITEM_END_OF_LIST
- break gatherlines
- }
- *flags |= LIST_ITEM_CONTAINS_BLOCK
-
- // anything following an empty line is only part
- // of this item if it is indented 4 spaces
- // (regardless of the indentation of the beginning of the item)
- case containsBlankLine && indent < 4:
- if *flags&LIST_TYPE_DEFINITION != 0 && i < len(data)-1 {
- // is the next item still a part of this list?
- next := i
- for data[next] != '\n' {
- next++
- }
- for next < len(data)-1 && data[next] == '\n' {
- next++
- }
- if i < len(data)-1 && data[i] != ':' && data[next] != ':' {
- *flags |= LIST_ITEM_END_OF_LIST
- }
- } else {
- *flags |= LIST_ITEM_END_OF_LIST
- }
- break gatherlines
-
- // a blank line means this should be parsed as a block
- case containsBlankLine:
- *flags |= LIST_ITEM_CONTAINS_BLOCK
- }
-
- containsBlankLine = false
-
- // add the line into the working buffer without prefix
- raw.Write(data[line+indent : i])
-
- line = i
- }
-
- // If reached end of data, the Renderer.ListItem call we're going to make below
- // is definitely the last in the list.
- if line >= len(data) {
- *flags |= LIST_ITEM_END_OF_LIST
- }
-
- rawBytes := raw.Bytes()
-
- // render the contents of the list item
- var cooked bytes.Buffer
- if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 && *flags&LIST_TYPE_TERM == 0 {
- // intermediate render of block item, except for definition term
- if sublist > 0 {
- p.block(&cooked, rawBytes[:sublist])
- p.block(&cooked, rawBytes[sublist:])
- } else {
- p.block(&cooked, rawBytes)
- }
- } else {
- // intermediate render of inline item
- if sublist > 0 {
- p.inline(&cooked, rawBytes[:sublist])
- p.block(&cooked, rawBytes[sublist:])
- } else {
- p.inline(&cooked, rawBytes)
- }
- }
-
- // render the actual list item
- cookedBytes := cooked.Bytes()
- parsedEnd := len(cookedBytes)
-
- // strip trailing newlines
- for parsedEnd > 0 && cookedBytes[parsedEnd-1] == '\n' {
- parsedEnd--
- }
- p.r.ListItem(out, cookedBytes[:parsedEnd], *flags)
-
- return line
-}
-
-// render a single paragraph that has already been parsed out
-func (p *parser) renderParagraph(out *bytes.Buffer, data []byte) {
- if len(data) == 0 {
- return
- }
-
- // trim leading spaces
- beg := 0
- for data[beg] == ' ' {
- beg++
- }
-
- // trim trailing newline
- end := len(data) - 1
-
- // trim trailing spaces
- for end > beg && data[end-1] == ' ' {
- end--
- }
-
- work := func() bool {
- p.inline(out, data[beg:end])
- return true
- }
- p.r.Paragraph(out, work)
-}
-
-func (p *parser) paragraph(out *bytes.Buffer, data []byte) int {
- // prev: index of 1st char of previous line
- // line: index of 1st char of current line
- // i: index of cursor/end of current line
- var prev, line, i int
-
- // keep going until we find something to mark the end of the paragraph
- for i < len(data) {
- // mark the beginning of the current line
- prev = line
- current := data[i:]
- line = i
-
- // did we find a blank line marking the end of the paragraph?
- if n := p.isEmpty(current); n > 0 {
- // did this blank line followed by a definition list item?
- if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
- if i < len(data)-1 && data[i+1] == ':' {
- return p.list(out, data[prev:], LIST_TYPE_DEFINITION)
- }
- }
-
- p.renderParagraph(out, data[:i])
- return i + n
- }
-
- // an underline under some text marks a header, so our paragraph ended on prev line
- if i > 0 {
- if level := p.isUnderlinedHeader(current); level > 0 {
- // render the paragraph
- p.renderParagraph(out, data[:prev])
-
- // ignore leading and trailing whitespace
- eol := i - 1
- for prev < eol && data[prev] == ' ' {
- prev++
- }
- for eol > prev && data[eol-1] == ' ' {
- eol--
- }
-
- // render the header
- // this ugly double closure avoids forcing variables onto the heap
- work := func(o *bytes.Buffer, pp *parser, d []byte) func() bool {
- return func() bool {
- pp.inline(o, d)
- return true
- }
- }(out, p, data[prev:eol])
-
- id := ""
- if p.flags&EXTENSION_AUTO_HEADER_IDS != 0 {
- id = SanitizedAnchorName(string(data[prev:eol]))
- }
-
- p.r.Header(out, work, level, id)
-
- // find the end of the underline
- for data[i] != '\n' {
- i++
- }
- return i
- }
- }
-
- // if the next line starts a block of HTML, then the paragraph ends here
- if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 {
- if data[i] == '<' && p.html(out, current, false) > 0 {
- // rewind to before the HTML block
- p.renderParagraph(out, data[:i])
- return i
- }
- }
-
- // if there's a prefixed header or a horizontal rule after this, paragraph is over
- if p.isPrefixHeader(current) || p.isHRule(current) {
- p.renderParagraph(out, data[:i])
- return i
- }
-
- // if there's a fenced code block, paragraph is over
- if p.flags&EXTENSION_FENCED_CODE != 0 {
- if p.fencedCodeBlock(out, current, false) > 0 {
- p.renderParagraph(out, data[:i])
- return i
- }
- }
-
- // if there's a definition list item, prev line is a definition term
- if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
- if p.dliPrefix(current) != 0 {
- return p.list(out, data[prev:], LIST_TYPE_DEFINITION)
- }
- }
-
- // if there's a list after this, paragraph is over
- if p.flags&EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK != 0 {
- if p.uliPrefix(current) != 0 ||
- p.oliPrefix(current) != 0 ||
- p.quotePrefix(current) != 0 ||
- p.codePrefix(current) != 0 {
- p.renderParagraph(out, data[:i])
- return i
- }
- }
-
- // otherwise, scan to the beginning of the next line
- for data[i] != '\n' {
- i++
- }
- i++
- }
-
- p.renderParagraph(out, data[:i])
- return i
-}
-
-// SanitizedAnchorName returns a sanitized anchor name for the given text.
-//
-// It implements the algorithm specified in the package comment.
-func SanitizedAnchorName(text string) string {
- var anchorName []rune
- futureDash := false
- for _, r := range text {
- switch {
- case unicode.IsLetter(r) || unicode.IsNumber(r):
- if futureDash && len(anchorName) > 0 {
- anchorName = append(anchorName, '-')
- }
- futureDash = false
- anchorName = append(anchorName, unicode.ToLower(r))
- default:
- futureDash = true
- }
- }
- return string(anchorName)
-}
diff --git a/vendor/github.com/russross/blackfriday/block_test.go b/vendor/github.com/russross/blackfriday/block_test.go
deleted file mode 100644
index f2d998df..00000000
--- a/vendor/github.com/russross/blackfriday/block_test.go
+++ /dev/null
@@ -1,1856 +0,0 @@
-//
-// Blackfriday Markdown Processor
-// Available at http://github.com/russross/blackfriday
-//
-// Copyright © 2011 Russ Ross Header 1
\n",
-
- "## Header 2\n",
- "Header 2
\n",
-
- "### Header 3\n",
- "Header 3
\n",
-
- "#### Header 4\n",
- "Header 4
\n",
-
- "##### Header 5\n",
- "Header 5
\n",
-
- "###### Header 6\n",
- "Header 6
\n",
-
- "####### Header 7\n",
- "# Header 7
\n",
-
- "#Header 1\n",
- "Header 1
\n",
-
- "##Header 2\n",
- "Header 2
\n",
-
- "###Header 3\n",
- "Header 3
\n",
-
- "####Header 4\n",
- "Header 4
\n",
-
- "#####Header 5\n",
- "Header 5
\n",
-
- "######Header 6\n",
- "Header 6
\n",
-
- "#######Header 7\n",
- "#Header 7
\n",
-
- "Hello\n# Header 1\nGoodbye\n",
- "Header 1
\n\n\n
\n",
-
- "* List\n#Header\n* List\n",
- "Header
\n
\n",
-
- "* List\n * Nested list\n # Nested header\n",
- "Header
\n
\n",
-
- "#Header 1 \\#\n",
- "\n
Nested header
Header 1 #
\n",
-
- "#Header 1 \\# foo\n",
- "Header 1 # foo
\n",
-
- "#Header 1 #\\##\n",
- "Header 1 ##
\n",
- }
- doTestsBlock(t, tests, 0)
-}
-
-func TestPrefixHeaderSpaceExtension(t *testing.T) {
- var tests = []string{
- "# Header 1\n",
- "Header 1
\n",
-
- "## Header 2\n",
- "Header 2
\n",
-
- "### Header 3\n",
- "Header 3
\n",
-
- "#### Header 4\n",
- "Header 4
\n",
-
- "##### Header 5\n",
- "Header 5
\n",
-
- "###### Header 6\n",
- "Header 6
\n",
-
- "####### Header 7\n",
- "Header 1
\n\n\n
\n",
-
- "* List\n#Header\n* List\n",
- "Header
\n
\n",
-
- "* List\n * Nested list\n # Nested header\n",
- "\n
\n",
- }
- doTestsBlock(t, tests, EXTENSION_SPACE_HEADERS)
-}
-
-func TestPrefixHeaderIdExtension(t *testing.T) {
- var tests = []string{
- "# Header 1 {#someid}\n",
- "\n
Nested header
Header 1
\n",
-
- "# Header 1 {#someid} \n",
- "Header 1
\n",
-
- "# Header 1 {#someid}\n",
- "Header 1
\n",
-
- "# Header 1 {#someid\n",
- "Header 1 {#someid
\n",
-
- "# Header 1 {#someid\n",
- "Header 1 {#someid
\n",
-
- "# Header 1 {#someid}}\n",
- "Header 1
\n\nHeader 2
\n",
-
- "### Header 3 {#someid}\n",
- "Header 3
\n",
-
- "#### Header 4 {#someid}\n",
- "Header 4
\n",
-
- "##### Header 5 {#someid}\n",
- "Header 5
\n",
-
- "###### Header 6 {#someid}\n",
- "Header 6
\n",
-
- "####### Header 7 {#someid}\n",
- "# Header 7
\n",
-
- "# Header 1 # {#someid}\n",
- "Header 1
\n",
-
- "## Header 2 ## {#someid}\n",
- "Header 2
\n",
-
- "Hello\n# Header 1\nGoodbye\n",
- "Header 1
\n\n\n
\n",
-
- "* List\n#Header {#someid}\n* List\n",
- "Header
\n
\n",
-
- "* List\n * Nested list\n # Nested header {#someid}\n",
- "Header
\n
\n",
- }
- doTestsBlock(t, tests, EXTENSION_HEADER_IDS)
-}
-
-func TestPrefixHeaderIdExtensionWithPrefixAndSuffix(t *testing.T) {
- var tests = []string{
- "# header 1 {#someid}\n",
- "\n
Nested header
header 1
\n",
-
- "## header 2 {#someid}\n",
- "header 2
\n",
-
- "### header 3 {#someid}\n",
- "header 3
\n",
-
- "#### header 4 {#someid}\n",
- "header 4
\n",
-
- "##### header 5 {#someid}\n",
- "header 5
\n",
-
- "###### header 6 {#someid}\n",
- "header 6
\n",
-
- "####### header 7 {#someid}\n",
- "# header 7
\n",
-
- "# header 1 # {#someid}\n",
- "header 1
\n",
-
- "## header 2 ## {#someid}\n",
- "header 2
\n",
-
- "* List\n# Header {#someid}\n* List\n",
- "\n
\n",
-
- "* List\n#Header {#someid}\n* List\n",
- "Header
\n
\n",
-
- "* List\n * Nested list\n # Nested header {#someid}\n",
- "Header
\n
\n",
- }
-
- parameters := HtmlRendererParameters{
- HeaderIDPrefix: "PRE:",
- HeaderIDSuffix: ":POST",
- }
-
- doTestsBlockWithRunner(t, tests, EXTENSION_HEADER_IDS, runnerWithRendererParameters(parameters))
-}
-
-func TestPrefixAutoHeaderIdExtension(t *testing.T) {
- var tests = []string{
- "# Header 1\n",
- "\n
Nested header
Header 1
\n",
-
- "# Header 1 \n",
- "Header 1
\n",
-
- "## Header 2\n",
- "Header 2
\n",
-
- "### Header 3\n",
- "Header 3
\n",
-
- "#### Header 4\n",
- "Header 4
\n",
-
- "##### Header 5\n",
- "Header 5
\n",
-
- "###### Header 6\n",
- "Header 6
\n",
-
- "####### Header 7\n",
- "# Header 7
\n",
-
- "Hello\n# Header 1\nGoodbye\n",
- "Header 1
\n\n\n
\n",
-
- "* List\n#Header\n* List\n",
- "Header
\n
\n",
-
- "* List\n * Nested list\n # Nested header\n",
- "Header
\n
\n",
-
- "# Header\n\n# Header\n",
- "\n
Nested header
Header
\n\nHeader
\n",
-
- "# Header 1\n\n# Header 1",
- "Header 1
\n\nHeader 1
\n",
-
- "# Header\n\n# Header 1\n\n# Header\n\n# Header",
- "Header
\n\nHeader 1
\n\nHeader
\n\nHeader
\n",
- }
- doTestsBlock(t, tests, EXTENSION_AUTO_HEADER_IDS)
-}
-
-func TestPrefixAutoHeaderIdExtensionWithPrefixAndSuffix(t *testing.T) {
- var tests = []string{
- "# Header 1\n",
- "Header 1
\n",
-
- "# Header 1 \n",
- "Header 1
\n",
-
- "## Header 2\n",
- "Header 2
\n",
-
- "### Header 3\n",
- "Header 3
\n",
-
- "#### Header 4\n",
- "Header 4
\n",
-
- "##### Header 5\n",
- "Header 5
\n",
-
- "###### Header 6\n",
- "Header 6
\n",
-
- "####### Header 7\n",
- "# Header 7
\n",
-
- "Hello\n# Header 1\nGoodbye\n",
- "Header 1
\n\n\n
\n",
-
- "* List\n#Header\n* List\n",
- "Header
\n
\n",
-
- "* List\n * Nested list\n # Nested header\n",
- "Header
\n
\n",
-
- "# Header\n\n# Header\n",
- "\n
Nested header
Header
\n\nHeader
\n",
-
- "# Header 1\n\n# Header 1",
- "Header 1
\n\nHeader 1
\n",
-
- "# Header\n\n# Header 1\n\n# Header\n\n# Header",
- "Header
\n\nHeader 1
\n\nHeader
\n\nHeader
\n",
- }
-
- parameters := HtmlRendererParameters{
- HeaderIDPrefix: "PRE:",
- HeaderIDSuffix: ":POST",
- }
-
- doTestsBlockWithRunner(t, tests, EXTENSION_AUTO_HEADER_IDS, runnerWithRendererParameters(parameters))
-}
-
-func TestPrefixMultipleHeaderExtensions(t *testing.T) {
- var tests = []string{
- "# Header\n\n# Header {#header}\n\n# Header 1",
- "Header
\n\nHeader
\n\nHeader 1
\n",
- }
- doTestsBlock(t, tests, EXTENSION_AUTO_HEADER_IDS|EXTENSION_HEADER_IDS)
-}
-
-func TestUnderlineHeaders(t *testing.T) {
- var tests = []string{
- "Header 1\n========\n",
- "Header 1
\n",
-
- "Header 2\n--------\n",
- "Header 2
\n",
-
- "A\n=\n",
- "A
\n",
-
- "B\n-\n",
- "B
\n",
-
- "Paragraph\nHeader\n=\n",
- "Header
\n",
-
- "Header\n===\nParagraph\n",
- "Header
\n\nHeader
\n\nAnother header
\n",
-
- " Header\n======\n",
- "Header
\n",
-
- " Code\n========\n",
- "
\n\nCode\n
Header with inline
\n",
-
- "* List\n * Sublist\n Not a header\n ------\n",
- "\n
\n",
-
- "Paragraph\n\n\n\n\nHeader\n===\n",
- "\n
Header
\n",
-
- "Trailing space \n==== \n\n",
- "Trailing space
\n",
-
- "Trailing spaces\n==== \n\n",
- "Trailing spaces
\n",
-
- "Double underline\n=====\n=====\n",
- "Double underline
\n\nHeader 1
\n",
-
- "Header 2\n--------\n",
- "Header 2
\n",
-
- "A\n=\n",
- "A
\n",
-
- "B\n-\n",
- "B
\n",
-
- "Paragraph\nHeader\n=\n",
- "Header
\n",
-
- "Header\n===\nParagraph\n",
- "Header
\n\nHeader
\n\nAnother header
\n",
-
- " Header\n======\n",
- "Header
\n",
-
- "Header with *inline*\n=====\n",
- "Header with inline
\n",
-
- "Paragraph\n\n\n\n\nHeader\n===\n",
- "Header
\n",
-
- "Trailing space \n==== \n\n",
- "Trailing space
\n",
-
- "Trailing spaces\n==== \n\n",
- "Trailing spaces
\n",
-
- "Double underline\n=====\n=====\n",
- "Double underline
\n\nHeader
\n\nHeader
\n",
-
- "Header 1\n========\n\nHeader 1\n========\n",
- "Header 1
\n\nHeader 1
\n",
- }
- doTestsBlock(t, tests, EXTENSION_AUTO_HEADER_IDS)
-}
-
-func TestHorizontalRule(t *testing.T) {
- var tests = []string{
- "-\n",
- "
\n",
-
- "----\n",
- "
\n",
-
- "*\n",
- "
\n",
-
- "****\n",
- "
\n",
-
- "_\n",
- "
\n",
-
- "____\n",
- "
\n",
-
- "-*-\n",
- "
\n",
-
- "* * *\n",
- "
\n",
-
- "_ _ _\n",
- "
\n",
-
- "-----*\n",
- "
\n",
-
- "Hello\n***\n",
- "
\n",
-
- "---\n***\n___\n",
- "
\n\n
\n\n
\n",
- }
- doTestsBlock(t, tests, 0)
-}
-
-func TestUnorderedList(t *testing.T) {
- var tests = []string{
- "* Hello\n",
- "\n
\n",
-
- "* Yin\n* Yang\n",
- "\n
\n",
-
- "* Ting\n* Bong\n* Goo\n",
- "\n
\n",
-
- "* Yin\n\n* Yang\n",
- "\n
\n",
-
- "* Ting\n\n* Bong\n* Goo\n",
- "\n
\n",
-
- "+ Hello\n",
- "\n
\n",
-
- "+ Yin\n+ Yang\n",
- "\n
\n",
-
- "+ Ting\n+ Bong\n+ Goo\n",
- "\n
\n",
-
- "+ Yin\n\n+ Yang\n",
- "\n
\n",
-
- "+ Ting\n\n+ Bong\n+ Goo\n",
- "\n
\n",
-
- "- Hello\n",
- "\n
\n",
-
- "- Yin\n- Yang\n",
- "\n
\n",
-
- "- Ting\n- Bong\n- Goo\n",
- "\n
\n",
-
- "- Yin\n\n- Yang\n",
- "\n
\n",
-
- "- Ting\n\n- Bong\n- Goo\n",
- "\n
\n",
-
- "*Hello\n",
- "\n
\n",
-
- "* Hello \n Next line \n",
- "\n
\n",
-
- "Paragraph\n* No linebreak\n",
- "\n
\n",
-
- "* List\n\n1. Spacer Mixed listing\n",
- "\n
\n\n\n
\n",
-
- "* List\n * Nested list\n",
- "\n
\n",
-
- "* List\n\n * Nested list\n",
- "\n
\n
\n",
-
- "* List\n Second line\n\n + Nested\n",
- "\n
\n
\n",
-
- "* List\n + Nested\n\n Continued\n",
- "\n
\n
\n",
-
- "* List\n * shallow indent\n",
- "\n
\n\n\n
\n",
-
- "* List\n" +
- " * shallow indent\n" +
- " * part of second list\n" +
- " * still second\n" +
- " * almost there\n" +
- " * third level\n",
- "\n
\n" +
- "
\n",
-
- "* List\n extra indent, same paragraph\n",
- "\n" +
- "
\n" +
- "
\n
\n",
-
- "* List\n\n code block\n\n* List continues",
- "\n
\n",
-
- "* List\n\n code block with spaces\n",
- "code block\n
\n
\n",
-
- "* List\n\n * sublist\n\n normal text\n\n * another sublist\n",
- " code block with spaces\n
\n
\n",
-
- `* Foo
-
- bar
-
- qux
-`,
- `\n
\n\n\n
-
-`,
- }
- doTestsBlock(t, tests, 0)
-}
-
-func TestFencedCodeBlockWithinList(t *testing.T) {
- doTestsBlock(t, []string{
- "* Foo\n\n ```\n bar\n\n qux\n ```\n",
- `bar
-
-qux
-
-
-`,
- }, EXTENSION_FENCED_CODE)
-}
-
-func TestOrderedList(t *testing.T) {
- var tests = []string{
- "1. Hello\n",
- "bar
-
-qux
-
\n
\n",
-
- "1. Yin\n2. Yang\n",
- "\n
\n",
-
- "1. Ting\n2. Bong\n3. Goo\n",
- "\n
\n",
-
- "1. Yin\n\n2. Yang\n",
- "\n
\n",
-
- "1. Ting\n\n2. Bong\n3. Goo\n",
- "\n
\n",
-
- "1 Hello\n",
- "\n
\n",
-
- "1. Hello \n Next line \n",
- "\n
\n",
-
- "Paragraph\n1. No linebreak\n",
- "\n
\n",
-
- "1. List\n 1. Nested list\n",
- "\n
\n",
-
- "1. List\n\n 1. Nested list\n",
- "\n
\n
\n",
-
- "1. List\n Second line\n\n 1. Nested\n",
- "\n
\n
\n",
-
- "1. List\n 1. Nested\n\n Continued\n",
- "\n
\n
\n",
-
- "1. List\n 1. shallow indent\n",
- "\n
\n\n\n
\n",
-
- "1. List\n" +
- " 1. shallow indent\n" +
- " 2. part of second list\n" +
- " 3. still second\n" +
- " 4. almost there\n" +
- " 1. third level\n",
- "\n
\n" +
- "
\n",
-
- "1. List\n extra indent, same paragraph\n",
- "\n" +
- "
\n" +
- "
\n
\n",
-
- "1. List\n\n code block\n",
- "\n
\n",
-
- "1. List\n\n code block with spaces\n",
- "code block\n
\n
\n",
-
- "1. List\n\n* Spacer Mixed listing\n",
- " code block with spaces\n
\n
\n\n\n
\n",
-
- "1. List\n* Mixed listing\n",
- "\n
\n",
-
- "1. List\n * Mixted list\n",
- "\n
\n",
-
- "1. List\n * Mixed list\n",
- "\n
\n
\n",
-
- "* Start with unordered\n 1. Ordered\n",
- "\n
\n
\n",
-
- "* Start with unordered\n 1. Ordered\n",
- "\n
\n
\n",
-
- "1. numbers\n1. are ignored\n",
- "\n
\n
\n",
-
- `1. Foo
-
- bar
-
-
-
- qux
-`,
- `
-
-`,
- }
- doTestsBlock(t, tests, 0)
-}
-
-func TestDefinitionList(t *testing.T) {
- var tests = []string{
- "Term 1\n: Definition a\n",
- "bar
-
-
-
-qux
-
\n
\n",
-
- "Term 1\n: Definition a \n",
- "\n
\n",
-
- "Term 1\n: Definition a\n: Definition b\n",
- "\n
\n",
-
- "Term 1\n: Definition a\n\nTerm 2\n: Definition b\n",
- "\n" +
- "
\n",
-
- "Term 1\n: Definition a\n\nTerm 2\n: Definition b\n\nTerm 3\n: Definition c\n",
- "\n" +
- "
\n",
-
- "Term 1\n: Definition a\n: Definition b\n\nTerm 2\n: Definition c\n",
- "\n" +
- "
\n",
-
- "Term 1\n\n: Definition a\n\nTerm 2\n\n: Definition b\n",
- "\n" +
- "
\n",
-
- "Term 1\n\n: Definition a\n\n: Definition b\n\nTerm 2\n\n: Definition c\n",
- "\n" +
- "
\n",
-
- "Term 1\n: Definition a\nNext line\n",
- "\n
\n",
-
- "Term 1\n: Definition a\n Next line\n",
- "\n
\n",
-
- "Term 1\n: Definition a \n Next line \n",
- "\n
\n",
-
- "Term 1\n: Definition a\nNext line\n\nTerm 2\n: Definition b",
- "\n" +
- "
\n",
-
- "Term 1\n: Definition a\n",
- "\n
\n",
-
- "Term 1\n:Definition a\n",
- "\n" +
- "
\n" +
- "\n\n" +
- "
\n" +
- "\n\n" +
- "
\n" +
- "\n\n" +
- "
\n",
- }
- doTestsBlock(t, tests, EXTENSION_DEFINITION_LISTS)
-}
-
-func TestPreformattedHtml(t *testing.T) {
- var tests = []string{
- "\n",
- "\n",
-
- "\n
Paragraph\n
Paragraph
\n\nParagraph\n
Paragraph
\n\nParagraph\n
And here?
\n", - - "Paragraph\n\nParagraph
\n\nAnd here?
\n", - } - doTestsBlock(t, tests, 0) -} - -func TestPreformattedHtmlLax(t *testing.T) { - var tests = []string{ - "Paragraph\nParagraph
\n\nParagraph
\n\nParagraph
\n\nAnd here?
\n", - - "Paragraph\n\nParagraph
\n\nAnd here?
\n", - - "Paragraph\nParagraph
\n\nAnd here?
\n", - - "Paragraph\n\nParagraph
\n\nAnd here?
\n", - } - doTestsBlock(t, tests, EXTENSION_LAX_HTML_BLOCKS) -} - -func TestFencedCodeBlock(t *testing.T) { - var tests = []string{ - "``` go\nfunc foo() bool {\n\treturn true;\n}\n```\n", - "func foo() bool {\n\treturn true;\n}\n
\n",
-
- "``` go foo bar\nfunc foo() bool {\n\treturn true;\n}\n```\n",
- "func foo() bool {\n\treturn true;\n}\n
\n",
-
- "``` c\n/* special & char < > \" escaping */\n```\n",
- "/* special & char < > " escaping */\n
\n",
-
- "``` c\nno *inline* processing ~~of text~~\n```\n",
- "no *inline* processing ~~of text~~\n
\n",
-
- "```\nNo language\n```\n",
- "No language\n
\n",
-
- "``` {ocaml}\nlanguage in braces\n```\n",
- "language in braces\n
\n",
-
- "``` {ocaml} \nwith extra whitespace\n```\n",
- "with extra whitespace\n
\n",
-
- "```{ ocaml }\nwith extra whitespace\n```\n",
- "with extra whitespace\n
\n",
-
- "~ ~~ java\nWith whitespace\n~~~\n",
- "~ ~~ java\nWith whitespace\n~~~
\n", - - "~~\nonly two\n~~\n", - "~~\nonly two\n~~
\n", - - "```` python\nextra\n````\n", - "extra\n
\n",
-
- "~~~ perl\nthree to start, four to end\n~~~~\n",
- "~~~ perl\nthree to start, four to end\n~~~~
\n", - - "~~~~ perl\nfour to start, three to end\n~~~\n", - "~~~~ perl\nfour to start, three to end\n~~~
\n", - - "~~~ bash\ntildes\n~~~\n", - "tildes\n
\n",
-
- "``` lisp\nno ending\n",
- "``` lisp\nno ending
\n", - - "~~~ lisp\nend with language\n~~~ lisp\n", - "end with language\n
\n\nlisp
\n", - - "```\nmismatched begin and end\n~~~\n", - "```\nmismatched begin and end\n~~~
\n", - - "~~~\nmismatched begin and end\n```\n", - "~~~\nmismatched begin and end\n```
\n", - - " ``` oz\nleading spaces\n```\n", - "leading spaces\n
\n",
-
- " ``` oz\nleading spaces\n ```\n",
- "leading spaces\n
\n",
-
- " ``` oz\nleading spaces\n ```\n",
- "leading spaces\n
\n",
-
- "``` oz\nleading spaces\n ```\n",
- "leading spaces\n
\n",
-
- " ``` oz\nleading spaces\n ```\n",
- "``` oz\n
\n\nleading spaces\n ```
\n", - - "Bla bla\n\n``` oz\ncode blocks breakup paragraphs\n```\n\nBla Bla\n", - "Bla bla
\n\ncode blocks breakup paragraphs\n
\n\nBla Bla
\n", - - "Some text before a fenced code block\n``` oz\ncode blocks breakup paragraphs\n```\nAnd some text after a fenced code block", - "Some text before a fenced code block
\n\ncode blocks breakup paragraphs\n
\n\nAnd some text after a fenced code block
\n", - - "`", - "`
\n", - - "Bla bla\n\n``` oz\ncode blocks breakup paragraphs\n```\n\nBla Bla\n\n``` oz\nmultiple code blocks work okay\n```\n\nBla Bla\n", - "Bla bla
\n\ncode blocks breakup paragraphs\n
\n\nBla Bla
\n\nmultiple code blocks work okay\n
\n\nBla Bla
\n", - - "Some text before a fenced code block\n``` oz\ncode blocks breakup paragraphs\n```\nSome text in between\n``` oz\nmultiple code blocks work okay\n```\nAnd some text after a fenced code block", - "Some text before a fenced code block
\n\ncode blocks breakup paragraphs\n
\n\nSome text in between
\n\nmultiple code blocks work okay\n
\n\nAnd some text after a fenced code block
\n", - - "```\n[]:()\n```\n", - "[]:()\n
\n",
-
- "```\n[]:()\n[]:)\n[]:(\n[]:x\n[]:testing\n[:testing\n\n[]:\nlinebreak\n[]()\n\n[]:\n[]()\n```",
- "[]:()\n[]:)\n[]:(\n[]:x\n[]:testing\n[:testing\n\n[]:\nlinebreak\n[]()\n\n[]:\n[]()\n
\n",
-
- "- test\n\n```\n codeblock\n ```\ntest\n",
- "test
\n\ncodeblock\n
test
\n", - - "- ```\n codeblock\n ```\n\n- test\n", - "codeblock\n
test
codeblock\n
test
\n\ncodeblock\n
test
test
\n\nfunc foo() bool {\n\treturn true;\n}\n
test
--`, - // ------------------------------------------- - cat("> foo", - "> ", - "> ```go", - "package moo", - "```", - "> ", - "> goo.", - ""), - `-package moo - -
--`, - // ------------------------------------------- - cat("> foo", - "> ", - "> quote", - "continues", - "```", - ""), - `foo
- -- -package moo -
goo.
-
--`, - // ------------------------------------------- - cat("> foo", - "> ", - "> ```go", - "package moo", - "```", - "> ", - "> goo.", - "> ", - "> ```go", - "package zoo", - "```", - "> ", - "> woo.", - ""), - `foo
- -quote -continues -` + "```" + `
-
--`, - } - - // These 2 alternative forms of blockquoted fenced code blocks should produce same output. - forms := [2]string{ - cat("> plain quoted text", - "> ```fenced", - "code", - " with leading single space correctly preserved", - "okay", - "```", - "> rest of quoted text"), - cat("> plain quoted text", - "> ```fenced", - "> code", - "> with leading single space correctly preserved", - "> okay", - "> ```", - "> rest of quoted text"), - } - want := `foo
- -- -package moo -
goo.
- -- -package zoo -
woo.
-
--` - tests = append(tests, forms[0], want) - tests = append(tests, forms[1], want) - - doTestsBlock(t, tests, EXTENSION_FENCED_CODE) -} - -func TestTable(t *testing.T) { - var tests = []string{ - "a | b\n---|---\nc | d\n", - "plain quoted text
- -- -code - with leading single space correctly preserved -okay -
rest of quoted text
-
a | \nb | \n
---|---|
c | \nd | \n
a | b\n---|--\nc | d
\n", - - "|a|b|c|d|\n|----|----|----|---|\n|e|f|g|h|\n", - "a | \nb | \nc | \nd | \n
---|---|---|---|
e | \nf | \ng | \nh | \n
a | \nb | \nc | \nd | \n
---|---|---|---|
e | \nf | \ng | \nh | \n
a | \nb | \nc | \n
---|---|---|
d | \ne | \nf | \n
g | \nh | \n\n |
i | \nj | \nk | \n
n | \no | \np | \n
a | \nb | \nc | \n
---|---|---|
d | \ne | \nf | \n
a | \nb | \n" + - "c | \nd | \n
---|---|---|---|
e | \nf | \n" + - "g | \nh | \n
a | \nb | \nc | \n
---|
a | \nb | \nc | \nd | \ne | \n
---|---|---|---|---|
f | \ng | \nh | \ni | \nj | \n
a | \nb|c | \nd | \n
---|---|---|
f | \ng|h | \ni | \n
Yin
Yang
Ting
Bong
Goo
Yin
Yang
Ting
Bong
Goo
Yin
Yang
Ting
Bong
Goo
*Hello
\n", - - "* Hello \n", - "Paragraph
\n\nParagraph
\n\nList
\n\nList\nSecond line
\n\nList
\n\nContinued
List
\n\ncode block\n
List
\n\n code block with spaces\n
List
\n\nnormal text
\n\nYin
Yang
Ting
Bong
Goo
1 Hello
\n", - - "1.Hello\n", - "1.Hello
\n", - - "1. Hello \n", - "Paragraph
\n\nParagraph
\n\nList
\n\nList\nSecond line
\n\nList
\n\nContinued
List
\n\ncode block\n
List
\n\n code block with spaces\n
func foo() bool {\n\treturn true;\n}\n
\n",
-
- "``` go foo bar\nfunc foo() bool {\n\treturn true;\n}\n```\n",
- "func foo() bool {\n\treturn true;\n}\n
\n",
-
- "``` c\n/* special & char < > \" escaping */\n```\n",
- "/* special & char < > " escaping */\n
\n",
-
- "``` c\nno *inline* processing ~~of text~~\n```\n",
- "no *inline* processing ~~of text~~\n
\n",
-
- "```\nNo language\n```\n",
- "No language\n
\n",
-
- "``` {ocaml}\nlanguage in braces\n```\n",
- "language in braces\n
\n",
-
- "``` {ocaml} \nwith extra whitespace\n```\n",
- "with extra whitespace\n
\n",
-
- "```{ ocaml }\nwith extra whitespace\n```\n",
- "with extra whitespace\n
\n",
-
- "~ ~~ java\nWith whitespace\n~~~\n",
- "~ ~~ java\nWith whitespace\n~~~
\n", - - "~~\nonly two\n~~\n", - "~~\nonly two\n~~
\n", - - "```` python\nextra\n````\n", - "extra\n
\n",
-
- "~~~ perl\nthree to start, four to end\n~~~~\n",
- "~~~ perl\nthree to start, four to end\n~~~~
\n", - - "~~~~ perl\nfour to start, three to end\n~~~\n", - "~~~~ perl\nfour to start, three to end\n~~~
\n", - - "~~~ bash\ntildes\n~~~\n", - "tildes\n
\n",
-
- "``` lisp\nno ending\n",
- "``` lisp\nno ending
\n", - - "~~~ lisp\nend with language\n~~~ lisp\n", - "end with language\n
\n\nlisp
\n", - - "```\nmismatched begin and end\n~~~\n", - "```\nmismatched begin and end\n~~~
\n", - - "~~~\nmismatched begin and end\n```\n", - "~~~\nmismatched begin and end\n```
\n", - - " ``` oz\nleading spaces\n```\n", - "leading spaces\n
\n",
-
- " ``` oz\nleading spaces\n ```\n",
- "leading spaces\n
\n",
-
- " ``` oz\nleading spaces\n ```\n",
- "leading spaces\n
\n",
-
- "``` oz\nleading spaces\n ```\n",
- "leading spaces\n
\n",
-
- " ``` oz\nleading spaces\n ```\n",
- "``` oz\n
\n\nleading spaces
\n\n```\n
\n",
- }
- doTestsBlock(t, tests, EXTENSION_FENCED_CODE|EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK)
-}
-
-func TestListWithFencedCodeBlock(t *testing.T) {
- var tests = []string{
- "1. one\n\n ```\n code\n ```\n\n2. two\n",
- "one
\n\ncode\n
two
one
\n\n- code\n
two
one
\n\n\ncode\n
two
one
\n\n```
\n\ntwo
Some text
\n\n\n", - - "Some text\n\n\n", - "Some text
\n\n\n", - - "Some text\n\n\n", - "Some text
\n\n\n", - } - doTestsBlock(t, tests, 0) -} - -func TestCDATA(t *testing.T) { - var tests = []string{ - "Some text\n\n\n", - "Some text
\n\n\n", - - "CDATA ]]\n\n\n", - "CDATA ]]
\n\n\n", - - "CDATA >\n\n]]>\n", - "CDATA >
\n\n]]>\n", - - "Lots of text\n\nLots of text
\n\n<![CDATA[foo]]>\n
\n",
-
- "\n",
- "\n",
-
- ` def func():
-> pass
-]]>
-`,
- ` def func():
-> pass
-]]>
-`,
- }, EXTENSION_FENCED_CODE)
-}
-
-func TestIsFenceLine(t *testing.T) {
- tests := []struct {
- data []byte
- infoRequested bool
- newlineOptional bool
- wantEnd int
- wantMarker string
- wantInfo string
- }{
- {
- data: []byte("```"),
- wantEnd: 0,
- },
- {
- data: []byte("```\nstuff here\n"),
- wantEnd: 4,
- wantMarker: "```",
- },
- {
- data: []byte("```\nstuff here\n"),
- infoRequested: true,
- wantEnd: 4,
- wantMarker: "```",
- },
- {
- data: []byte("stuff here\n```\n"),
- wantEnd: 0,
- },
- {
- data: []byte("```"),
- newlineOptional: true,
- wantEnd: 3,
- wantMarker: "```",
- },
- {
- data: []byte("```"),
- infoRequested: true,
- newlineOptional: true,
- wantEnd: 3,
- wantMarker: "```",
- },
- {
- data: []byte("``` go"),
- infoRequested: true,
- newlineOptional: true,
- wantEnd: 6,
- wantMarker: "```",
- wantInfo: "go",
- },
- {
- data: []byte("``` go foo bar"),
- infoRequested: true,
- newlineOptional: true,
- wantEnd: 14,
- wantMarker: "```",
- wantInfo: "go foo bar",
- },
- {
- data: []byte("``` go foo bar "),
- infoRequested: true,
- newlineOptional: true,
- wantEnd: 16,
- wantMarker: "```",
- wantInfo: "go foo bar",
- },
- }
-
- for _, test := range tests {
- var info *string
- if test.infoRequested {
- info = new(string)
- }
- end, marker := isFenceLine(test.data, info, "```", test.newlineOptional)
- if got, want := end, test.wantEnd; got != want {
- t.Errorf("got end %v, want %v", got, want)
- }
- if got, want := marker, test.wantMarker; got != want {
- t.Errorf("got marker %q, want %q", got, want)
- }
- if test.infoRequested {
- if got, want := *info, test.wantInfo; got != want {
- t.Errorf("got info %q, want %q", got, want)
- }
- }
- }
-}
-
-func TestJoinLines(t *testing.T) {
- input := `# 标题
-
-第一
-行文字。
-
-第
-二
-行文字。
-`
- result := `第一行文字。
- -第二行文字。
-` - opt := Options{Extensions: commonExtensions | EXTENSION_JOIN_LINES} - renderer := HtmlRenderer(commonHtmlFlags, "", "") - output := MarkdownOptions([]byte(input), renderer, opt) - - if string(output) != result { - t.Error("output dose not match.") - } -} - -func TestSanitizedAnchorName(t *testing.T) { - tests := []struct { - text string - want string - }{ - { - text: "This is a header", - want: "this-is-a-header", - }, - { - text: "This is also a header", - want: "this-is-also-a-header", - }, - { - text: "main.go", - want: "main-go", - }, - { - text: "Article 123", - want: "article-123", - }, - { - text: "<- Let's try this, shall we?", - want: "let-s-try-this-shall-we", - }, - { - text: " ", - want: "", - }, - { - text: "Hello, 世界", - want: "hello-世界", - }, - } - for _, test := range tests { - if got := SanitizedAnchorName(test.text); got != test.want { - t.Errorf("SanitizedAnchorName(%q):\ngot %q\nwant %q", test.text, got, test.want) - } - } -} diff --git a/vendor/github.com/russross/blackfriday/doc.go b/vendor/github.com/russross/blackfriday/doc.go deleted file mode 100644 index 9656c42a..00000000 --- a/vendor/github.com/russross/blackfriday/doc.go +++ /dev/null @@ -1,32 +0,0 @@ -// Package blackfriday is a Markdown processor. -// -// It translates plain text with simple formatting rules into HTML or LaTeX. -// -// Sanitized Anchor Names -// -// Blackfriday includes an algorithm for creating sanitized anchor names -// corresponding to a given input text. This algorithm is used to create -// anchors for headings when EXTENSION_AUTO_HEADER_IDS is enabled. The -// algorithm is specified below, so that other packages can create -// compatible anchor names and links to those anchors. -// -// The algorithm iterates over the input text, interpreted as UTF-8, -// one Unicode code point (rune) at a time. All runes that are letters (category L) -// or numbers (category N) are considered valid characters. They are mapped to -// lower case, and included in the output. All other runes are considered -// invalid characters. Invalid characters that preceed the first valid character, -// as well as invalid character that follow the last valid character -// are dropped completely. All other sequences of invalid characters -// between two valid characters are replaced with a single dash character '-'. -// -// SanitizedAnchorName exposes this functionality, and can be used to -// create compatible links to the anchor names generated by blackfriday. -// This algorithm is also implemented in a small standalone package at -// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients -// that want a small package and don't need full functionality of blackfriday. -package blackfriday - -// NOTE: Keep Sanitized Anchor Name algorithm in sync with package -// github.com/shurcooL/sanitized_anchor_name. -// Otherwise, users of sanitized_anchor_name will get anchor names -// that are incompatible with those generated by blackfriday. diff --git a/vendor/github.com/russross/blackfriday/go.mod b/vendor/github.com/russross/blackfriday/go.mod deleted file mode 100644 index b05561a0..00000000 --- a/vendor/github.com/russross/blackfriday/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/russross/blackfriday diff --git a/vendor/github.com/russross/blackfriday/html.go b/vendor/github.com/russross/blackfriday/html.go deleted file mode 100644 index e0a6c69c..00000000 --- a/vendor/github.com/russross/blackfriday/html.go +++ /dev/null @@ -1,938 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross