blob: e1db0578751ddc52b14073b99b9675c755f104a0 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
|
<
<
[ /0 /1 /2 /3 /4 /5 /6 /7 /8 /9 ] ==digits
{ 0 ==result
{ "(.)(.*)" regex } {
{ streq }_ digits -01 index result 10 mul add =result
} loop
result
}
> -- /base10decode deff
{ /f deff /re deff _ /s defv re { f } { s } ? * } /rxparse deff
{ ==TOKID ==TOKSTR ==TOKINT
" " cat
{ < /handle deff /value defv > } /token deff
[ -01 { _ "" streq not } {
0 /matched defv { /f deff matched { -- } { { 1 =matched f } rxparse } ? * } /parse deff
{ "^ (.*)" regex } { } parse
{ "^#" regex } { "" } parse
{ "^(\\d+) +(.*)" regex } { TOKINT token -01 } parse
{ "^\"(.*)" regex } {
"" /str defv
{ _ "^\"(.*)" regex { -01 -- 0 } { 1 } ? * } {
0 /strmatched defv { /f deff strmatched { -- } { { 1 =strmatched f } rxparse } ? * } /strparse deff
{ "^\\\\\\\\(.*)" regex } { str "\\" cat =str } strparse
{ "^\\\\n(.*)" regex } { str "\n" cat =str } strparse
{ "^\\\\r(.*)" regex } { str "\r" cat =str } strparse
{ "^\\\\0(.*)" regex } { str "\0" cat =str } strparse
{ "^\\\\\"(.*)" regex } { str "\"" cat =str } strparse
{ "^([^\"\\\\])(.*)" regex } { str -01 cat =str } strparse
strmatched not { "Tokenization of string-like failed" die } rep
} loop
str TOKSTR token -01
} parse
{ "^([^a-zA-Z0-9 ]+)([a-zA-Z0-9][^ ]*) +(.*)" regex } { -201 TOKSTR token " " -1203 cat cat } parse
{ "^([a-zA-Z0-9]+|[^a-zA-Z0-9 ]+) +(.*)" regex } { TOKID token -01 } parse
matched not { "Tokenization failed: " -01 cat die } rep
} loop -- ]
} /tokenize deff
> /elymas defv
# vim: syn=elymas
|