efp.go 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671
  1. // Package efp (Excel Formula Parser) tokenize an Excel formula using an
  2. // implementation of E. W. Bachtal's algorithm, found here:
  3. // https://ewbi.blogs.com/develops/2004/12/excel_formula_p.html
  4. //
  5. // Go language version by Ri Xu: https://xuri.me
  6. package efp
  7. import (
  8. "regexp"
  9. "strconv"
  10. "strings"
  11. )
  12. // QuoteDouble, QuoteSingle and other's constants are token definitions.
  13. const (
  14. // Character constants
  15. QuoteDouble = "\""
  16. QuoteSingle = "'"
  17. BracketClose = "]"
  18. BracketOpen = "["
  19. BraceOpen = "{"
  20. BraceClose = "}"
  21. ParenOpen = "("
  22. ParenClose = ")"
  23. Semicolon = ";"
  24. Whitespace = " "
  25. Comma = ","
  26. ErrorStart = "#"
  27. OperatorsSN = "+-"
  28. OperatorsInfix = "+-*/^&=><"
  29. OperatorsPostfix = "%"
  30. // Token type
  31. TokenTypeNoop = "Noop"
  32. TokenTypeOperand = "Operand"
  33. TokenTypeFunction = "Function"
  34. TokenTypeSubexpression = "Subexpression"
  35. TokenTypeArgument = "Argument"
  36. TokenTypeOperatorPrefix = "OperatorPrefix"
  37. TokenTypeOperatorInfix = "OperatorInfix"
  38. TokenTypeOperatorPostfix = "OperatorPostfix"
  39. TokenTypeWhitespace = "Whitespace"
  40. TokenTypeUnknown = "Unknown"
  41. // Token subtypes
  42. TokenSubTypeStart = "Start"
  43. TokenSubTypeStop = "Stop"
  44. TokenSubTypeText = "Text"
  45. TokenSubTypeNumber = "Number"
  46. TokenSubTypeLogical = "Logical"
  47. TokenSubTypeError = "Error"
  48. TokenSubTypeRange = "Range"
  49. TokenSubTypeMath = "Math"
  50. TokenSubTypeConcatenation = "Concatenation"
  51. TokenSubTypeIntersection = "Intersection"
  52. TokenSubTypeUnion = "Union"
  53. )
  54. // Token encapsulate a formula token.
  55. type Token struct {
  56. TValue string
  57. TType string
  58. TSubType string
  59. }
  60. // Tokens directly maps the ordered list of tokens.
  61. // Attributes:
  62. //
  63. // items - Ordered list
  64. // index - Current position in the list
  65. //
  66. type Tokens struct {
  67. Index int
  68. Items []Token
  69. }
  70. // Parser inheritable container. TokenStack directly maps a LIFO stack of
  71. // tokens.
  72. type Parser struct {
  73. Formula string
  74. Tokens Tokens
  75. TokenStack Tokens
  76. Offset int
  77. Token string
  78. InString bool
  79. InPath bool
  80. InRange bool
  81. InError bool
  82. }
  83. // fToken provides function to encapsulate a formula token.
  84. func fToken(value, tokenType, subType string) Token {
  85. return Token{
  86. TValue: value,
  87. TType: tokenType,
  88. TSubType: subType,
  89. }
  90. }
  91. // fTokens provides function to handle an ordered list of tokens.
  92. func fTokens() Tokens {
  93. return Tokens{
  94. Index: -1,
  95. }
  96. }
  97. // add provides function to add a token to the end of the list.
  98. func (tk *Tokens) add(value, tokenType, subType string) Token {
  99. token := fToken(value, tokenType, subType)
  100. tk.addRef(token)
  101. return token
  102. }
  103. // addRef provides function to add a token to the end of the list.
  104. func (tk *Tokens) addRef(token Token) {
  105. tk.Items = append(tk.Items, token)
  106. }
  107. // reset provides function to reset the index to -1.
  108. func (tk *Tokens) reset() {
  109. tk.Index = -1
  110. }
  111. // BOF provides function to check whether beginning of list.
  112. func (tk *Tokens) BOF() bool {
  113. return tk.Index <= 0
  114. }
  115. // EOF provides function to check whether end of list.
  116. func (tk *Tokens) EOF() bool {
  117. return tk.Index >= (len(tk.Items) - 1)
  118. }
  119. // moveNext provides function to move the index along one.
  120. func (tk *Tokens) moveNext() bool {
  121. if tk.EOF() {
  122. return false
  123. }
  124. tk.Index++
  125. return true
  126. }
  127. // current return the current token.
  128. func (tk *Tokens) current() *Token {
  129. if tk.Index == -1 {
  130. return nil
  131. }
  132. return &tk.Items[tk.Index]
  133. }
  134. // next return the next token (leave the index unchanged).
  135. func (tk *Tokens) next() *Token {
  136. if tk.EOF() {
  137. return nil
  138. }
  139. return &tk.Items[tk.Index+1]
  140. }
  141. // previous return the previous token (leave the index unchanged).
  142. func (tk *Tokens) previous() *Token {
  143. if tk.Index < 1 {
  144. return nil
  145. }
  146. return &tk.Items[tk.Index-1]
  147. }
  148. // push provides function to push a token onto the stack.
  149. func (tk *Tokens) push(token Token) {
  150. tk.Items = append(tk.Items, token)
  151. }
  152. // pop provides function to pop a token off the stack.
  153. func (tk *Tokens) pop() Token {
  154. if len(tk.Items) == 0 {
  155. return Token{
  156. TType: TokenTypeFunction,
  157. TSubType: TokenSubTypeStop,
  158. }
  159. }
  160. t := tk.Items[len(tk.Items)-1]
  161. tk.Items = tk.Items[:len(tk.Items)-1]
  162. return fToken("", t.TType, TokenSubTypeStop)
  163. }
  164. // token provides function to non-destructively return the top item on the
  165. // stack.
  166. func (tk *Tokens) token() *Token {
  167. if len(tk.Items) > 0 {
  168. return &tk.Items[len(tk.Items)-1]
  169. }
  170. return nil
  171. }
  172. // value return the top token's value.
  173. func (tk *Tokens) value() string {
  174. if tk.token() == nil {
  175. return ""
  176. }
  177. return tk.token().TValue
  178. }
  179. // tp return the top token's type.
  180. func (tk *Tokens) tp() string {
  181. if tk.token() == nil {
  182. return ""
  183. }
  184. return tk.token().TType
  185. }
  186. // subtype return the top token's subtype.
  187. func (tk *Tokens) subtype() string {
  188. if tk.token() == nil {
  189. return ""
  190. }
  191. return tk.token().TSubType
  192. }
  193. // ExcelParser provides function to parse an Excel formula into a stream of
  194. // tokens.
  195. func ExcelParser() Parser {
  196. return Parser{}
  197. }
  198. // getTokens return a token stream (list).
  199. func (ps *Parser) getTokens() Tokens {
  200. ps.Formula = strings.TrimSpace(ps.Formula)
  201. f := []rune(ps.Formula)
  202. if len(f) > 0 {
  203. if string(f[0]) != "=" {
  204. ps.Formula = "=" + ps.Formula
  205. }
  206. }
  207. // state-dependent character evaluation (order is important)
  208. for !ps.EOF() {
  209. // double-quoted strings
  210. // embeds are doubled
  211. // end marks token
  212. if ps.InString {
  213. if ps.currentChar() == QuoteDouble {
  214. if ps.nextChar() == QuoteDouble {
  215. ps.Token += QuoteDouble
  216. ps.Offset++
  217. } else {
  218. ps.InString = false
  219. ps.Tokens.add(ps.Token, TokenTypeOperand, TokenSubTypeText)
  220. ps.Token = ""
  221. }
  222. } else {
  223. ps.Token += ps.currentChar()
  224. }
  225. ps.Offset++
  226. continue
  227. }
  228. // single-quoted strings (links)
  229. // embeds are double
  230. // end does not mark a token
  231. if ps.InPath {
  232. if ps.currentChar() == QuoteSingle {
  233. if ps.nextChar() == QuoteSingle {
  234. ps.Token += QuoteSingle
  235. ps.Offset++
  236. } else {
  237. ps.InPath = false
  238. }
  239. } else {
  240. ps.Token += ps.currentChar()
  241. }
  242. ps.Offset++
  243. continue
  244. }
  245. // bracketed strings (range offset or linked workbook name)
  246. // no embeds (changed to "()" by Excel)
  247. // end does not mark a token
  248. if ps.InRange {
  249. if ps.currentChar() == BracketClose {
  250. ps.InRange = false
  251. }
  252. ps.Token += ps.currentChar()
  253. ps.Offset++
  254. continue
  255. }
  256. // error values
  257. // end marks a token, determined from absolute list of values
  258. if ps.InError {
  259. ps.Token += ps.currentChar()
  260. ps.Offset++
  261. if inStrSlice([]string{",#NULL!,", ",#DIV/0!,", ",#VALUE!,", ",#REF!,", ",#NAME?,", ",#NUM!,", ",#N/A,"}, Comma+ps.Token+Comma) != -1 {
  262. ps.InError = false
  263. ps.Tokens.add(ps.Token, TokenTypeOperand, TokenSubTypeError)
  264. ps.Token = ""
  265. }
  266. continue
  267. }
  268. // scientific notation check
  269. if strings.ContainsAny(ps.currentChar(), OperatorsSN) && len(ps.Token) > 1 {
  270. r, _ := regexp.Compile(`^[1-9]{1}(\.[0-9]+)?E{1}$`)
  271. if r.MatchString(ps.Token) {
  272. ps.Token += ps.currentChar()
  273. ps.Offset++
  274. continue
  275. }
  276. }
  277. // independent character evaluation (order not important)
  278. // establish state-dependent character evaluations
  279. if ps.currentChar() == QuoteDouble {
  280. if len(ps.Token) > 0 {
  281. // not expected
  282. ps.Tokens.add(ps.Token, TokenTypeUnknown, "")
  283. ps.Token = ""
  284. }
  285. ps.InString = true
  286. ps.Offset++
  287. continue
  288. }
  289. if ps.currentChar() == QuoteSingle {
  290. if len(ps.Token) > 0 {
  291. // not expected
  292. ps.Tokens.add(ps.Token, TokenTypeUnknown, "")
  293. ps.Token = ""
  294. }
  295. ps.InPath = true
  296. ps.Offset++
  297. continue
  298. }
  299. if ps.currentChar() == BracketOpen {
  300. ps.InRange = true
  301. ps.Token += ps.currentChar()
  302. ps.Offset++
  303. continue
  304. }
  305. if ps.currentChar() == ErrorStart {
  306. if len(ps.Token) > 0 {
  307. // not expected
  308. ps.Tokens.add(ps.Token, TokenTypeUnknown, "")
  309. ps.Token = ""
  310. }
  311. ps.InError = true
  312. ps.Token += ps.currentChar()
  313. ps.Offset++
  314. continue
  315. }
  316. // mark start and end of arrays and array rows
  317. if ps.currentChar() == BraceOpen {
  318. if len(ps.Token) > 0 {
  319. // not expected
  320. ps.Tokens.add(ps.Token, TokenTypeUnknown, "")
  321. ps.Token = ""
  322. }
  323. ps.TokenStack.push(ps.Tokens.add("ARRAY", TokenTypeFunction, TokenSubTypeStart))
  324. ps.TokenStack.push(ps.Tokens.add("ARRAYROW", TokenTypeFunction, TokenSubTypeStart))
  325. ps.Offset++
  326. continue
  327. }
  328. if ps.currentChar() == Semicolon {
  329. if len(ps.Token) > 0 {
  330. ps.Tokens.add(ps.Token, TokenTypeOperand, "")
  331. ps.Token = ""
  332. }
  333. ps.Tokens.addRef(ps.TokenStack.pop())
  334. ps.Tokens.add(Comma, TokenTypeArgument, "")
  335. ps.TokenStack.push(ps.Tokens.add("ARRAYROW", TokenTypeFunction, TokenSubTypeStart))
  336. ps.Offset++
  337. continue
  338. }
  339. if ps.currentChar() == BraceClose {
  340. if len(ps.Token) > 0 {
  341. ps.Tokens.add(ps.Token, TokenTypeOperand, "")
  342. ps.Token = ""
  343. }
  344. ps.Tokens.addRef(ps.TokenStack.pop())
  345. ps.Tokens.addRef(ps.TokenStack.pop())
  346. ps.Offset++
  347. continue
  348. }
  349. // trim white-space
  350. if ps.currentChar() == Whitespace {
  351. if len(ps.Token) > 0 {
  352. ps.Tokens.add(ps.Token, TokenTypeOperand, "")
  353. ps.Token = ""
  354. }
  355. ps.Tokens.add("", TokenTypeWhitespace, "")
  356. ps.Offset++
  357. for (ps.currentChar() == Whitespace) && (!ps.EOF()) {
  358. ps.Offset++
  359. }
  360. continue
  361. }
  362. // multi-character comparators
  363. if inStrSlice([]string{",>=,", ",<=,", ",<>,"}, Comma+ps.doubleChar()+Comma) != -1 {
  364. if len(ps.Token) > 0 {
  365. ps.Tokens.add(ps.Token, TokenTypeOperand, "")
  366. ps.Token = ""
  367. }
  368. ps.Tokens.add(ps.doubleChar(), TokenTypeOperatorInfix, TokenSubTypeLogical)
  369. ps.Offset += 2
  370. continue
  371. }
  372. // standard infix operators
  373. if strings.ContainsAny(OperatorsInfix, ps.currentChar()) {
  374. if len(ps.Token) > 0 {
  375. ps.Tokens.add(ps.Token, TokenTypeOperand, "")
  376. ps.Token = ""
  377. }
  378. ps.Tokens.add(ps.currentChar(), TokenTypeOperatorInfix, "")
  379. ps.Offset++
  380. continue
  381. }
  382. // standard postfix operators
  383. if ps.currentChar() == OperatorsPostfix {
  384. if len(ps.Token) > 0 {
  385. ps.Tokens.add(ps.Token, TokenTypeOperand, "")
  386. ps.Token = ""
  387. }
  388. ps.Tokens.add(ps.currentChar(), TokenTypeOperatorPostfix, "")
  389. ps.Offset++
  390. continue
  391. }
  392. // start subexpression or function
  393. if ps.currentChar() == ParenOpen {
  394. if len(ps.Token) > 0 {
  395. ps.TokenStack.push(ps.Tokens.add(ps.Token, TokenTypeFunction, TokenSubTypeStart))
  396. ps.Token = ""
  397. } else {
  398. ps.TokenStack.push(ps.Tokens.add("", TokenTypeSubexpression, TokenSubTypeStart))
  399. }
  400. ps.Offset++
  401. continue
  402. }
  403. // function, subexpression, array parameters
  404. if ps.currentChar() == Comma {
  405. if len(ps.Token) > 0 {
  406. ps.Tokens.add(ps.Token, TokenTypeOperand, "")
  407. ps.Token = ""
  408. }
  409. if ps.TokenStack.tp() != TokenTypeFunction {
  410. ps.Tokens.add(ps.currentChar(), TokenTypeOperatorInfix, TokenSubTypeUnion)
  411. } else {
  412. ps.Tokens.add(ps.currentChar(), TokenTypeArgument, "")
  413. }
  414. ps.Offset++
  415. continue
  416. }
  417. // stop subexpression
  418. if ps.currentChar() == ParenClose {
  419. if len(ps.Token) > 0 {
  420. ps.Tokens.add(ps.Token, TokenTypeOperand, "")
  421. ps.Token = ""
  422. }
  423. ps.Tokens.addRef(ps.TokenStack.pop())
  424. ps.Offset++
  425. continue
  426. }
  427. // token accumulation
  428. ps.Token += ps.currentChar()
  429. ps.Offset++
  430. }
  431. // dump remaining accumulation
  432. if len(ps.Token) > 0 {
  433. ps.Tokens.add(ps.Token, TokenTypeOperand, "")
  434. }
  435. // move all tokens to a new collection, excluding all unnecessary white-space tokens
  436. tokens2 := fTokens()
  437. for ps.Tokens.moveNext() {
  438. token := ps.Tokens.current()
  439. if token.TType == TokenTypeWhitespace {
  440. if ps.Tokens.BOF() || ps.Tokens.EOF() {
  441. } else if !(((ps.Tokens.previous().TType == TokenTypeFunction) && (ps.Tokens.previous().TSubType == TokenSubTypeStop)) || ((ps.Tokens.previous().TType == TokenTypeSubexpression) && (ps.Tokens.previous().TSubType == TokenSubTypeStop)) || (ps.Tokens.previous().TType == TokenTypeOperand)) {
  442. } else if !(((ps.Tokens.next().TType == TokenTypeFunction) && (ps.Tokens.next().TSubType == TokenSubTypeStart)) || ((ps.Tokens.next().TType == TokenTypeSubexpression) && (ps.Tokens.next().TSubType == TokenSubTypeStart)) || (ps.Tokens.next().TType == TokenTypeOperand)) {
  443. } else {
  444. tokens2.add(token.TValue, TokenTypeOperatorInfix, TokenSubTypeIntersection)
  445. }
  446. continue
  447. }
  448. tokens2.addRef(Token{
  449. TValue: token.TValue,
  450. TType: token.TType,
  451. TSubType: token.TSubType,
  452. })
  453. }
  454. // switch infix "-" operator to prefix when appropriate, switch infix "+"
  455. // operator to noop when appropriate, identify operand and infix-operator
  456. // subtypes, pull "@" from in front of function names
  457. for tokens2.moveNext() {
  458. token := tokens2.current()
  459. if (token.TType == TokenTypeOperatorInfix) && (token.TValue == "-") {
  460. if tokens2.BOF() {
  461. token.TType = TokenTypeOperatorPrefix
  462. } else if ((tokens2.previous().TType == TokenTypeFunction) && (tokens2.previous().TSubType == TokenSubTypeStop)) || ((tokens2.previous().TType == TokenTypeSubexpression) && (tokens2.previous().TSubType == TokenSubTypeStop)) || (tokens2.previous().TType == TokenTypeOperatorPostfix) || (tokens2.previous().TType == TokenTypeOperand) {
  463. token.TSubType = TokenSubTypeMath
  464. } else {
  465. token.TType = TokenTypeOperatorPrefix
  466. }
  467. continue
  468. }
  469. if (token.TType == TokenTypeOperatorInfix) && (token.TValue == "+") {
  470. if tokens2.BOF() {
  471. token.TType = TokenTypeNoop
  472. } else if (tokens2.previous().TType == TokenTypeFunction) && (tokens2.previous().TSubType == TokenSubTypeStop) || ((tokens2.previous().TType == TokenTypeSubexpression) && (tokens2.previous().TSubType == TokenSubTypeStop) || (tokens2.previous().TType == TokenTypeOperatorPostfix) || (tokens2.previous().TType == TokenTypeOperand)) {
  473. token.TSubType = TokenSubTypeMath
  474. } else {
  475. token.TType = TokenTypeNoop
  476. }
  477. continue
  478. }
  479. if (token.TType == TokenTypeOperatorInfix) && (len(token.TSubType) == 0) {
  480. if strings.ContainsAny(token.TValue[0:1], "<>=") {
  481. token.TSubType = TokenSubTypeLogical
  482. } else if token.TValue == "&" {
  483. token.TSubType = TokenSubTypeConcatenation
  484. } else {
  485. token.TSubType = TokenSubTypeMath
  486. }
  487. continue
  488. }
  489. if (token.TType == TokenTypeOperand) && (len(token.TSubType) == 0) {
  490. if _, err := strconv.ParseFloat(token.TValue, 64); err != nil {
  491. if (token.TValue == "TRUE") || (token.TValue == "FALSE") {
  492. token.TSubType = TokenSubTypeLogical
  493. } else {
  494. token.TSubType = TokenSubTypeRange
  495. }
  496. } else {
  497. token.TSubType = TokenSubTypeNumber
  498. }
  499. continue
  500. }
  501. if token.TType == TokenTypeFunction {
  502. if (len(token.TValue) > 0) && token.TValue[0:1] == "@" {
  503. token.TValue = token.TValue[1:]
  504. }
  505. continue
  506. }
  507. }
  508. tokens2.reset()
  509. // move all tokens to a new collection, excluding all no-ops
  510. tokens := fTokens()
  511. for tokens2.moveNext() {
  512. if tokens2.current().TType != TokenTypeNoop {
  513. tokens.addRef(Token{
  514. TValue: tokens2.current().TValue,
  515. TType: tokens2.current().TType,
  516. TSubType: tokens2.current().TSubType,
  517. })
  518. }
  519. }
  520. tokens.reset()
  521. return tokens
  522. }
  523. // doubleChar provides function to get two characters after the current
  524. // position.
  525. func (ps *Parser) doubleChar() string {
  526. if len([]rune(ps.Formula)) >= ps.Offset+2 {
  527. return string([]rune(ps.Formula)[ps.Offset : ps.Offset+2])
  528. }
  529. return ""
  530. }
  531. // currentChar provides function to get the character of the current position.
  532. func (ps *Parser) currentChar() string {
  533. return string([]rune(ps.Formula)[ps.Offset])
  534. }
  535. // nextChar provides function to get the next character of the current position.
  536. func (ps *Parser) nextChar() string {
  537. if len([]rune(ps.Formula)) >= ps.Offset+2 {
  538. return string([]rune(ps.Formula)[ps.Offset+1 : ps.Offset+2])
  539. }
  540. return ""
  541. }
  542. // EOF provides function to check whether end of tokens stack.
  543. func (ps *Parser) EOF() bool {
  544. return ps.Offset >= len([]rune(ps.Formula))
  545. }
  546. // Parse provides function to parse formula as a token stream (list).
  547. func (ps *Parser) Parse(formula string) []Token {
  548. ps.Formula = formula
  549. ps.Tokens = ps.getTokens()
  550. return ps.Tokens.Items
  551. }
  552. // PrettyPrint provides function to pretty the parsed result with the indented
  553. // format.
  554. func (ps *Parser) PrettyPrint() string {
  555. indent := 0
  556. output := ""
  557. for _, t := range ps.Tokens.Items {
  558. if t.TSubType == TokenSubTypeStop {
  559. indent--
  560. }
  561. for i := 0; i < indent; i++ {
  562. output += "\t"
  563. }
  564. output += t.TValue + " <" + t.TType + "> <" + t.TSubType + ">" + "\n"
  565. if t.TSubType == TokenSubTypeStart {
  566. indent++
  567. }
  568. }
  569. return output
  570. }
  571. // Render provides function to get formatted formula after parsed.
  572. func (ps *Parser) Render() string {
  573. output := ""
  574. for _, t := range ps.Tokens.Items {
  575. if t.TType == TokenTypeFunction && t.TSubType == TokenSubTypeStart {
  576. output += t.TValue + ParenOpen
  577. } else if t.TType == TokenTypeFunction && t.TSubType == TokenSubTypeStop {
  578. output += ParenClose
  579. } else if t.TType == TokenTypeSubexpression && t.TSubType == TokenSubTypeStart {
  580. output += ParenOpen
  581. } else if t.TType == TokenTypeSubexpression && t.TSubType == TokenSubTypeStop {
  582. output += ParenClose
  583. } else if t.TType == TokenTypeOperand && t.TSubType == TokenSubTypeText {
  584. output += QuoteDouble + t.TValue + QuoteDouble
  585. } else if t.TType == TokenTypeOperatorInfix && t.TSubType == TokenSubTypeIntersection {
  586. output += Whitespace
  587. } else {
  588. output += t.TValue
  589. }
  590. }
  591. return output
  592. }
  593. // inStrSlice provides a method to check if an element is present in an array,
  594. // and return the index of its location, otherwise return -1.
  595. func inStrSlice(a []string, x string) int {
  596. for idx, n := range a {
  597. if x == n {
  598. return idx
  599. }
  600. }
  601. return -1
  602. }