diff --git a/frontend/components.d.ts b/frontend/components.d.ts
index 157a331..167553d 100644
--- a/frontend/components.d.ts
+++ b/frontend/components.d.ts
@@ -100,6 +100,7 @@ declare module 'vue' {
ElRow: typeof import('element-plus/es')['ElRow']
ElScrollbar: typeof import('element-plus/es')['ElScrollbar']
ElSelect: typeof import('element-plus/es')['ElSelect']
+ ElSelectV2: typeof import('element-plus/es')['ElSelectV2']
ElSlider: typeof import('element-plus/es')['ElSlider']
ElSpace: typeof import('element-plus/es')['ElSpace']
ElSwitch: typeof import('element-plus/es')['ElSwitch']
diff --git a/godo/ai/convert/doc.go b/godo/ai/convert/doc.go
deleted file mode 100644
index 17b0770..0000000
--- a/godo/ai/convert/doc.go
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * GodoOS - A lightweight cloud desktop
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package convert
-
-import (
- "bytes"
- "io"
-
- "godo/ai/convert/doc"
-)
-
-// ConvertDoc converts an MS Word .doc to text.
-func ConvertDoc(r io.Reader) (string, error) {
-
- buf, err := doc.ParseDoc(r)
-
- if err != nil {
- return "", err
- }
-
- return buf.(*bytes.Buffer).String(), nil
-}
diff --git a/godo/ai/convert/doc/clx.go b/godo/ai/convert/doc/clx.go
deleted file mode 100644
index 2b40a47..0000000
--- a/godo/ai/convert/doc/clx.go
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * GodoOS - A lightweight cloud desktop
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package doc
-
-import (
- "encoding/binary"
- "errors"
-
- "github.com/richardlehane/mscfb"
-)
-
-var (
- errInvalidPrc = errors.New("Invalid Prc structure")
- errInvalidClx = errors.New("expected last aCP value to equal fib.cpLength (2.8.35)")
- errInvalidPcdt = errors.New("expected clxt to be equal 0x02")
-)
-
-type clx struct {
- pcdt pcdt
-}
-
-type pcdt struct {
- lcb int
- PlcPcd plcPcd
-}
-
-type plcPcd struct {
- aCP []int
- aPcd []pcd
-}
-
-type pcd struct {
- fc fcCompressed
-}
-
-type fcCompressed struct {
- fc int
- fCompressed bool
-}
-
-// read Clx (section 2.9.38)
-func getClx(table *mscfb.File, fib *fib) (*clx, error) {
- if table == nil || fib == nil {
- return nil, errInvalidArgument
- }
- b, err := readClx(table, fib)
- if err != nil {
- return nil, err
- }
-
- pcdtOffset, err := getPrcArrayEnd(b)
- if err != nil {
- return nil, err
- }
-
- pcdt, err := getPcdt(b, pcdtOffset)
- if err != nil {
- return nil, err
- }
-
- if pcdt.PlcPcd.aCP[len(pcdt.PlcPcd.aCP)-1] != fib.fibRgLw.cpLength {
- return nil, errInvalidClx
- }
-
- return &clx{pcdt: *pcdt}, nil
-}
-
-func readClx(table *mscfb.File, fib *fib) ([]byte, error) {
- b := make([]byte, fib.fibRgFcLcb.lcbClx)
- _, err := table.ReadAt(b, int64(fib.fibRgFcLcb.fcClx))
- if err != nil {
- return nil, err
- }
- return b, nil
-}
-
-// read Pcdt from Clx (section 2.9.178)
-func getPcdt(clx []byte, pcdtOffset int) (*pcdt, error) {
- const pcdSize = 8
- if clx[pcdtOffset] != 0x02 { // clxt must be 0x02 or invalid
- return nil, errInvalidPcdt
- }
- lcb := int(binary.LittleEndian.Uint32(clx[pcdtOffset+1 : pcdtOffset+5])) // skip clxt, get lcb
- plcPcdOffset := pcdtOffset + 5 // skip clxt and lcb
- numPcds := (lcb - 4) / (4 + pcdSize) // see 2.2.2 in the spec for equation
- numCps := numPcds + 1 // always 1 more cp than pcds
-
- cps := make([]int, numCps)
- for i := 0; i < numCps; i++ {
- cpOffset := plcPcdOffset + i*4
- cps[i] = int(binary.LittleEndian.Uint32(clx[cpOffset : cpOffset+4]))
- }
-
- pcdStart := plcPcdOffset + 4*numCps
- pcds := make([]pcd, numPcds)
- for i := 0; i < numPcds; i++ {
- pcdOffset := pcdStart + i*pcdSize
- pcds[i] = *parsePcd(clx[pcdOffset : pcdOffset+pcdSize])
- }
- return &pcdt{lcb: lcb, PlcPcd: plcPcd{aCP: cps, aPcd: pcds}}, nil
-}
-
-// find end of RgPrc array (section 2.9.38)
-func getPrcArrayEnd(clx []byte) (int, error) {
- prcOffset := 0
- count := 0
- for {
- clxt := clx[prcOffset]
- if clxt != 0x01 { // this is not a Prc, so exit
- return prcOffset, nil
- }
- prcDataCbGrpprl := binary.LittleEndian.Uint16(clx[prcOffset+1 : prcOffset+3]) // skip the clxt and read 2 bytes
- prcOffset += 1 + 2 + int(prcDataCbGrpprl) // skip clxt, cbGrpprl, and GrpPrl
-
- if count > 10000 || prcDataCbGrpprl <= 0 || prcOffset+3 > len(clx) { // ensure no infinite loop
- return 0, errInvalidPrc
- }
- count++
- }
-}
-
-// parse Pcd (section 2.9.177)
-func parsePcd(pcdData []byte) *pcd {
- return &pcd{fc: *parseFcCompressed(pcdData[2:6])}
-}
-
-// parse FcCompressed (section 2.9.73)
-func parseFcCompressed(fcData []byte) *fcCompressed {
- fCompressed := fcData[3]&64 == 64 // check fcompressed value (second bit from lestmost of the last byte in fcdata)
- fcData[3] = fcData[3] & 63 // clear the fcompressed value from data
- fc := binary.LittleEndian.Uint32(fcData) // word doc generally uses little endian order (1.3.7)
- return &fcCompressed{fc: int(fc), fCompressed: fCompressed}
-}
diff --git a/godo/ai/convert/doc/doc.go b/godo/ai/convert/doc/doc.go
deleted file mode 100644
index be77862..0000000
--- a/godo/ai/convert/doc/doc.go
+++ /dev/null
@@ -1,258 +0,0 @@
-/*
- * GodoOS - A lightweight cloud desktop
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package doc
-
-import (
- "bytes"
- "encoding/binary"
- "errors"
- "io"
- "unicode/utf16"
-
- "github.com/mattetti/filebuffer"
- "github.com/richardlehane/mscfb"
-)
-
-var (
- errTable = errors.New("cannot find table stream")
- errDocEmpty = errors.New("WordDocument not found")
- errDocShort = errors.New("wordDoc block too short")
- errInvalidArgument = errors.New("invalid table and/or fib")
-)
-
-type allReader interface {
- io.Closer
- io.ReaderAt
- io.ReadSeeker
-}
-
-func wrapError(e error) error {
- return errors.New("Error processing file: " + e.Error())
-}
-
-// ParseDoc converts a standard io.Reader from a Microsoft Word
-// .doc binary file and returns a reader (actually a bytes.Buffer)
-// which will output the plain text found in the .doc file
-func ParseDoc(r io.Reader) (io.Reader, error) {
- ra, ok := r.(io.ReaderAt)
- if !ok {
- ra, _, err := toMemoryBuffer(r)
- if err != nil {
- return nil, wrapError(err)
- }
- defer ra.Close()
- }
-
- d, err := mscfb.New(ra)
- if err != nil {
- return nil, wrapError(err)
- }
-
- wordDoc, table0, table1 := getWordDocAndTables(d)
- fib, err := getFib(wordDoc)
- if err != nil {
- return nil, wrapError(err)
- }
-
- table := getActiveTable(table0, table1, fib)
- if table == nil {
- return nil, wrapError(errTable)
- }
-
- clx, err := getClx(table, fib)
- if err != nil {
- return nil, wrapError(err)
- }
-
- return getText(wordDoc, clx)
-}
-
-func toMemoryBuffer(r io.Reader) (allReader, int64, error) {
- var b bytes.Buffer
- size, err := b.ReadFrom(r)
- if err != nil {
- return nil, 0, err
- }
- fb := filebuffer.New(b.Bytes())
- return fb, size, nil
-}
-
-func getText(wordDoc *mscfb.File, clx *clx) (io.Reader, error) {
- //var buf bytes.Buffer
- var buf utf16Buffer
- for i := 0; i < len(clx.pcdt.PlcPcd.aPcd); i++ {
- pcd := clx.pcdt.PlcPcd.aPcd[i]
- cp := clx.pcdt.PlcPcd.aCP[i]
- cpNext := clx.pcdt.PlcPcd.aCP[i+1]
-
- //var start, end, size int
- var start, end int
- if pcd.fc.fCompressed {
- //size = 1
- start = pcd.fc.fc / 2
- end = start + cpNext - cp
- } else {
- //size = 2
- start = pcd.fc.fc
- end = start + 2*(cpNext-cp)
- }
-
- b := make([]byte, end-start)
- //_, err := wordDoc.ReadAt(b, int64(start/size)) // read all the characters
- _, err := wordDoc.ReadAt(b, int64(start))
- if err != nil {
- return nil, err
- }
- translateText(b, &buf, pcd.fc.fCompressed)
- }
- //return &buf, nil
- runes := utf16.Decode(buf.Chars())
-
- var out bytes.Buffer
- out.Grow(len(runes))
- for _, r := range runes {
- if r == 7 { // table column separator
- r = ' '
- } else if r < 32 && r != 9 && r != 10 && r != 13 { // skip non-printable ASCII characters
- continue
- }
- out.WriteRune(r)
- }
-
- return &out, nil
-}
-
-func translateText(b []byte, buf *utf16Buffer, fCompressed bool) {
- fieldLevel := 0
- var isFieldChar bool
- for cIndex := range b {
- // Handle special field characters (section 2.8.25)
- if b[cIndex] == 0x13 {
- isFieldChar = true
- fieldLevel++
- continue
- } else if b[cIndex] == 0x14 {
- isFieldChar = false
- continue
- } else if b[cIndex] == 0x15 {
- isFieldChar = false
- continue
- } else if isFieldChar {
- continue
- }
-
- // if b[cIndex] == 7 { // table column separator
- // buf.WriteByte(' ')
- // continue
- // } else if b[cIndex] < 32 && b[cIndex] != 9 && b[cIndex] != 10 && b[cIndex] != 13 { // skip non-printable ASCII characters
- // //buf.Write([]byte(fmt.Sprintf("|%#x|", b[cIndex])))
- // continue
- // }
-
- if fCompressed { // compressed, so replace compressed characters
- buf.Write(replaceCompressed(b[cIndex]))
- } else {
- //buf.Write(b)
- buf.WriteByte(b[cIndex])
- }
- }
-}
-
-func replaceCompressed(char byte) []byte {
- var v uint16
- switch char {
- case 0x82:
- v = 0x201A
- case 0x83:
- v = 0x0192
- case 0x84:
- v = 0x201E
- case 0x85:
- v = 0x2026
- case 0x86:
- v = 0x2020
- case 0x87:
- v = 0x2021
- case 0x88:
- v = 0x02C6
- case 0x89:
- v = 0x2030
- case 0x8A:
- v = 0x0160
- case 0x8B:
- v = 0x2039
- case 0x8C:
- v = 0x0152
- case 0x91:
- v = 0x2018
- case 0x92:
- v = 0x2019
- case 0x93:
- v = 0x201C
- case 0x94:
- v = 0x201D
- case 0x95:
- v = 0x2022
- case 0x96:
- v = 0x2013
- case 0x97:
- v = 0x2014
- case 0x98:
- v = 0x02DC
- case 0x99:
- v = 0x2122
- case 0x9A:
- v = 0x0161
- case 0x9B:
- v = 0x203A
- case 0x9C:
- v = 0x0153
- case 0x9F:
- v = 0x0178
- default:
- //return []byte{char}
- return []byte{char, 0x00}
- }
- out := make([]byte, 2)
- binary.LittleEndian.PutUint16(out, v)
- return out
-}
-
-func getWordDocAndTables(r *mscfb.Reader) (*mscfb.File, *mscfb.File, *mscfb.File) {
- var wordDoc, table0, table1 *mscfb.File
- for i := 0; i < len(r.File); i++ {
- stream := r.File[i]
-
- switch stream.Name {
- case "WordDocument":
- wordDoc = stream
- case "0Table":
- table0 = stream
- case "1Table":
- table1 = stream
- }
- }
- return wordDoc, table0, table1
-}
-
-func getActiveTable(table0 *mscfb.File, table1 *mscfb.File, f *fib) *mscfb.File {
- if f.base.fWhichTblStm == 0 {
- return table0
- }
- return table1
-}
diff --git a/godo/ai/convert/doc/fib.go b/godo/ai/convert/doc/fib.go
deleted file mode 100644
index cc87853..0000000
--- a/godo/ai/convert/doc/fib.go
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * GodoOS - A lightweight cloud desktop
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package doc
-
-import (
- "encoding/binary"
- "errors"
-
- "github.com/richardlehane/mscfb"
-)
-
-var (
- errFibInvalid = errors.New("file information block validation failed")
-)
-
-type fib struct {
- base fibBase
- csw int
- fibRgW fibRgW
- cslw int
- fibRgLw fibRgLw
- cbRgFcLcb int
- fibRgFcLcb fibRgFcLcb
-}
-
-type fibBase struct {
- fWhichTblStm int
-}
-
-type fibRgW struct {
-}
-
-type fibRgLw struct {
- ccpText int
- ccpFtn int
- ccpHdd int
- ccpMcr int
- ccpAtn int
- ccpEdn int
- ccpTxbx int
- ccpHdrTxbx int
- cpLength int
-}
-
-type fibRgFcLcb struct {
- fcPlcfFldMom int
- lcbPlcfFldMom int
- fcPlcfFldHdr int
- lcbPlcfFldHdr int
- fcPlcfFldFtn int
- lcbPlcfFldFtn int
- fcPlcfFldAtn int
- lcbPlcfFldAtn int
- fcClx int
- lcbClx int
-}
-
-// parse File Information Block (section 2.5.1)
-func getFib(wordDoc *mscfb.File) (*fib, error) {
- if wordDoc == nil {
- return nil, errDocEmpty
- }
-
- b := make([]byte, 898) // get FIB block up to FibRgFcLcb97
- _, err := wordDoc.ReadAt(b, 0)
- if err != nil {
- return nil, err
- }
-
- fibBase := getFibBase(b[0:32])
-
- fibRgW, csw, err := getFibRgW(b, 32)
- if err != nil {
- return nil, err
- }
-
- fibRgLw, cslw, err := getFibRgLw(b, 34+csw)
- if err != nil {
- return nil, err
- }
-
- fibRgFcLcb, cbRgFcLcb, err := getFibRgFcLcb(b, 34+csw+2+cslw)
-
- return &fib{base: *fibBase, csw: csw, cslw: cslw, fibRgW: *fibRgW, fibRgLw: *fibRgLw, fibRgFcLcb: *fibRgFcLcb, cbRgFcLcb: cbRgFcLcb}, err
-}
-
-// parse FibBase (section 2.5.2)
-func getFibBase(fib []byte) *fibBase {
- byt := fib[11] // fWhichTblStm is 2nd highest bit in this byte
- fWhichTblStm := int(byt >> 1 & 1) // set which table (0Table or 1Table) is the table stream
- return &fibBase{fWhichTblStm: fWhichTblStm}
-}
-
-func getFibRgW(fib []byte, start int) (*fibRgW, int, error) {
- if start+2 >= len(fib) { // must be big enough for csw
- return &fibRgW{}, 0, errFibInvalid
- }
-
- csw := int(binary.LittleEndian.Uint16(fib[start:start+2])) * 2 // in bytes
- return &fibRgW{}, csw, nil
-}
-
-// parse FibRgLw (section 2.5.4)
-func getFibRgLw(fib []byte, start int) (*fibRgLw, int, error) {
- fibRgLwStart := start + 2 // skip cslw
- if fibRgLwStart+88 >= len(fib) { // expect 88 bytes in fibRgLw
- return &fibRgLw{}, 0, errFibInvalid
- }
-
- cslw := getInt16(fib, start) * 4 // in bytes
- ccpText := getInt(fib, fibRgLwStart+3*4)
- ccpFtn := getInt(fib, fibRgLwStart+4*4)
- ccpHdd := getInt(fib, fibRgLwStart+5*4)
- ccpMcr := getInt(fib, fibRgLwStart+6*4)
- ccpAtn := getInt(fib, fibRgLwStart+7*4)
- ccpEdn := getInt(fib, fibRgLwStart+8*4)
- ccpTxbx := getInt(fib, fibRgLwStart+9*4)
- ccpHdrTxbx := getInt(fib, fibRgLwStart+10*4)
-
- // calculate cpLength. Used in PlcPcd verification (see section 2.8.35)
- var cpLength int
- if ccpFtn != 0 || ccpHdd != 0 || ccpMcr != 0 || ccpAtn != 0 || ccpEdn != 0 || ccpTxbx != 0 || ccpHdrTxbx != 0 {
- cpLength = ccpFtn + ccpHdd + ccpMcr + ccpAtn + ccpEdn + ccpTxbx + ccpHdrTxbx + ccpText + 1
- } else {
- cpLength = ccpText
- }
- return &fibRgLw{ccpText: ccpText, ccpFtn: ccpFtn, ccpHdd: ccpHdd, ccpMcr: ccpMcr, ccpAtn: ccpAtn,
- ccpEdn: ccpEdn, ccpTxbx: ccpTxbx, ccpHdrTxbx: ccpHdrTxbx, cpLength: cpLength}, cslw, nil
-}
-
-// parse FibRgFcLcb (section 2.5.5)
-func getFibRgFcLcb(fib []byte, start int) (*fibRgFcLcb, int, error) {
- fibRgFcLcbStart := start + 2 // skip cbRgFcLcb
- if fibRgFcLcbStart+186*4 < len(fib) { // expect 186+ values in FibRgFcLcb
- return &fibRgFcLcb{}, 0, errFibInvalid
- }
-
- cbRgFcLcb := getInt16(fib, start)
- fcPlcfFldMom := getInt(fib, fibRgFcLcbStart+32*4)
- lcbPlcfFldMom := getInt(fib, fibRgFcLcbStart+33*4)
- fcPlcfFldHdr := getInt(fib, fibRgFcLcbStart+34*4)
- lcbPlcfFldHdr := getInt(fib, fibRgFcLcbStart+35*4)
- fcPlcfFldFtn := getInt(fib, fibRgFcLcbStart+36*4)
- lcbPlcfFldFtn := getInt(fib, fibRgFcLcbStart+37*4)
- fcPlcfFldAtn := getInt(fib, fibRgFcLcbStart+38*4)
- lcbPlcfFldAtn := getInt(fib, fibRgFcLcbStart+39*4)
- fcClx := getInt(fib, fibRgFcLcbStart+66*4)
- lcbClx := getInt(fib, fibRgFcLcbStart+67*4)
- return &fibRgFcLcb{fcPlcfFldMom: fcPlcfFldMom, lcbPlcfFldMom: lcbPlcfFldMom, fcPlcfFldHdr: fcPlcfFldHdr, lcbPlcfFldHdr: lcbPlcfFldHdr,
- fcPlcfFldFtn: fcPlcfFldFtn, lcbPlcfFldFtn: lcbPlcfFldFtn, fcPlcfFldAtn: fcPlcfFldAtn, lcbPlcfFldAtn: lcbPlcfFldAtn,
- fcClx: fcClx, lcbClx: lcbClx}, cbRgFcLcb, nil
-}
-
-func getInt16(buf []byte, start int) int {
- return int(binary.LittleEndian.Uint16(buf[start : start+2]))
-}
-func getInt(buf []byte, start int) int {
- return int(binary.LittleEndian.Uint32(buf[start : start+4]))
-}
diff --git a/godo/ai/convert/doc/plcFld.go b/godo/ai/convert/doc/plcFld.go
deleted file mode 100644
index 774e735..0000000
--- a/godo/ai/convert/doc/plcFld.go
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * GodoOS - A lightweight cloud desktop
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package doc
-
-/* I don't think I'm going to need this
-type plcFld struct {
- aCp []int
- aFld []fld
-}
-
-type fld struct {
- fldch int
- grffld int
- fieldtype string
- fNested bool
- fHasSep bool
-}
-
-func getPlcFld(table *mscfb.File, offset, size int) (*plcFld, error) {
- if table == nil {
- return nil, errInvalidArgument
- }
- b := make([]byte, size)
- _, err := table.ReadAt(b, int64(offset))
- if err != nil {
- return nil, err
- }
-
- f, err := getFld(b)
- if err != nil {
- return nil, err
- }
-
- return f, nil
-}
-
-func getFld(plc []byte) (*plcFld, error) {
- return nil, nil
-}
-
-func getFieldType(grffld byte) string {
- switch grffld {
- case 0x01:
- return "Not Named"
- case 0x02:
- return "Not Named"
- case 0x03:
- return "REF"
- case 0x05:
- return "FTNREF"
- case 0x06:
- return "SET"
- case 0x07:
- return "IF"
- case 0x08:
- return "INDEX"
- case 0x0A:
- return "STYLEREF"
- case 0x0C:
- return "SEQ"
- case 0x0D:
- return "TOC"
- case 0x0E:
- return "INFO"
- case 0x0F:
- return "TITLE"
- case 0x10:
- return "SUBJECT"
- case 0x11:
- return "AUTHOR"
- case 0x12:
- return "KEYWORDS"
- case 0x13:
- return "COMMENTS"
- case 0x14:
- return "LASTSAVEDBY"
- case 0x15:
- return "CREATEDATE"
- case 0x16:
- return "SAVEDATE"
- case 0x17:
- return "PRINTDATE"
- case 0x18:
- return "REVNUM"
- case 0x19:
- return "EDITTIME"
- case 0x1A:
- return "NUMPAGES"
- case 0x1B:
- return "NUMWORDS"
- case 0x1C:
- return "NUMCHARS"
- case 0x1D:
- return "FILENAME"
- case 0x1E:
- return "TEMPLATE"
- case 0x1F:
- return "DATE"
- case 0x20:
- return "TIME"
- case 0x21:
- return "PAGE"
- case 0x22:
- return "="
- case 0x23:
- return "QUOTE"
- case 0x24:
- return "INCLUDE"
- case 0x25:
- return "PAGEREF"
- case 0x26:
- return "ASK"
- case 0x27:
- return "FILLIN"
- case 0x28:
- return "DATA"
- case 0x29:
- return "NEXT"
- case 0x2A:
- return "NEXTIF"
- case 0x2B:
- return "SKIPIF"
- case 0x2C:
- return "MERGEREC"
- case 0x2D:
- return "DDE"
- case 0x2E:
- return "DDEAUTO"
- case 0x2F:
- return "GLOSSARY"
- case 0x30:
- return "PRINT"
- case 0x31:
- return "EQ"
- case 0x32:
- return "GOTOBUTTON"
- case 0x33:
- return "MACROBUTTON"
- case 0x34:
- return "AUTONUMOUT"
- case 0x35:
- return "AUTONUMLGL"
- case 0x36:
- return "AUTONUM"
- case 0x37:
- return "IMPORT"
- case 0x38:
- return "LINK"
- case 0x39:
- return "SYMBOL"
- case 0x3A:
- return "EMBED"
- case 0x3B:
- return "MERGEFIELD"
- case 0x3C:
- return "USERNAME"
- case 0x3D:
- return "USERINITIALS"
- case 0x3E:
- return "USERADDRESS"
- case 0x3F:
- return "BARCODE"
- case 0x40:
- return "DOCVARIABLE"
- case 0x41:
- return "SECTION"
- case 0x42:
- return "SECTIONPAGES"
- case 0x43:
- return "INCLUDEPICTURE"
- case 0x44:
- return "INCLUDETEXT"
- case 0x45:
- return "FILESIZE"
- case 0x46:
- return "FORMTEXT"
- case 0x47:
- return "FORMCHECKBOX"
- case 0x48:
- return "NOTEREF"
- case 0x49:
- return "TOA"
- case 0x4B:
- return "MERGESEQ"
- case 0x4F:
- return "AUTOTEXT"
- case 0x50:
- return "COMPARE"
- case 0x51:
- return "ADDIN"
- case 0x53:
- return "FORMDROPDOWN"
- case 0x54:
- return "ADVANCE"
- case 0x55:
- return "DOCPROPERTY"
- case 0x57:
- return "CONTROL"
- case 0x58:
- return "HYPERLINK"
- case 0x59:
- return "AUTOTEXTLIST"
- case 0x5A:
- return "LISTNUM"
- case 0x5B:
- return "HTMLCONTROL"
- case 0x5C:
- return "BIDIOUTLINE"
- case 0x5D:
- return "ADDRESSBLOCK"
- case 0x5E:
- return "GREETINGLINE"
- case 0x5F:
- return "SHAPE"
- default:
- return "UNKNOWN"
- }
-}
-*/
diff --git a/godo/ai/convert/doc/utf16.go b/godo/ai/convert/doc/utf16.go
deleted file mode 100644
index bfb6e81..0000000
--- a/godo/ai/convert/doc/utf16.go
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * GodoOS - A lightweight cloud desktop
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package doc
-
-import (
- "encoding/binary"
-)
-
-type utf16Buffer struct {
- haveReadLowerByte bool
- char [2]byte
- data []uint16
-}
-
-func (buf *utf16Buffer) Write(p []byte) (n int, err error) {
- for i := range p {
- buf.WriteByte(p[i])
- }
- return len(p), nil
-}
-
-func (buf *utf16Buffer) WriteByte(b byte) error {
- if buf.haveReadLowerByte {
- buf.char[1] = b
- buf.data = append(buf.data, binary.LittleEndian.Uint16(buf.char[:]))
- } else {
- buf.char[0] = b
- }
- buf.haveReadLowerByte = !buf.haveReadLowerByte
- return nil
-}
-
-func (buf *utf16Buffer) Chars() []uint16 {
- if buf.haveReadLowerByte {
- return append(buf.data, uint16(buf.char[0]))
- }
- return buf.data
-}
diff --git a/godo/ai/convert/docx.go b/godo/ai/convert/docx.go
deleted file mode 100644
index 0546144..0000000
--- a/godo/ai/convert/docx.go
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * GodoOS - A lightweight cloud desktop
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package convert
-
-import (
- "archive/zip"
- "bytes"
- "encoding/xml"
- "fmt"
- "io"
- "os"
- "time"
-)
-
-type typeOverride struct {
- XMLName xml.Name `xml:"Override"`
- ContentType string `xml:"ContentType,attr"`
- PartName string `xml:"PartName,attr"`
-}
-
-type contentTypeDefinition struct {
- XMLName xml.Name `xml:"Types"`
- Overrides []typeOverride `xml:"Override"`
-}
-
-// ConvertDocx converts an MS Word docx file to text.
-func ConvertDocx(r io.Reader) (string, error) {
- var size int64
-
- // Common case: if the reader is a file (or trivial wrapper), avoid
- // loading it all into memory.
- var ra io.ReaderAt
- if f, ok := r.(interface {
- io.ReaderAt
- Stat() (os.FileInfo, error)
- }); ok {
- si, err := f.Stat()
- if err != nil {
- return "", err
- }
- size = si.Size()
- ra = f
- } else {
- b, err := io.ReadAll(io.LimitReader(r, maxBytes))
- if err != nil {
- return "", fmt.Errorf("error read data: %v", err)
- }
- size = int64(len(b))
- ra = bytes.NewReader(b)
- }
-
- zr, err := zip.NewReader(ra, size)
- if err != nil {
- return "", fmt.Errorf("error unzipping data: %v", err)
- }
-
- zipFiles := mapZipFiles(zr.File)
-
- contentTypeDefinition, err := getContentTypeDefinition(zipFiles["[Content_Types].xml"])
- if err != nil {
- return "", err
- }
-
- meta := make(map[string]string)
- var textHeader, textBody, textFooter string
- for _, override := range contentTypeDefinition.Overrides {
- f := zipFiles[override.PartName]
-
- switch {
- case override.ContentType == "application/vnd.openxmlformats-package.core-properties+xml":
- rc, err := f.Open()
- if err != nil {
- return "", fmt.Errorf("error opening '%v' from archive: %v", f.Name, err)
- }
- defer rc.Close()
-
- meta, err = XMLToMap(rc)
- if err != nil {
- return "", fmt.Errorf("error parsing '%v': %v", f.Name, err)
- }
-
- if tmp, ok := meta["modified"]; ok {
- if t, err := time.Parse(time.RFC3339, tmp); err == nil {
- meta["ModifiedDate"] = fmt.Sprintf("%d", t.Unix())
- }
- }
- if tmp, ok := meta["created"]; ok {
- if t, err := time.Parse(time.RFC3339, tmp); err == nil {
- meta["CreatedDate"] = fmt.Sprintf("%d", t.Unix())
- }
- }
- case override.ContentType == "application/vnd.openxmlformats-officedocument.wordprocessingml.document.main+xml":
- body, err := parseDocxText(f)
- if err != nil {
- return "", err
- }
- textBody += body + "\n"
- case override.ContentType == "application/vnd.openxmlformats-officedocument.wordprocessingml.footer+xml":
- footer, err := parseDocxText(f)
- if err != nil {
- return "", err
- }
- textFooter += footer + "\n"
- case override.ContentType == "application/vnd.openxmlformats-officedocument.wordprocessingml.header+xml":
- header, err := parseDocxText(f)
- if err != nil {
- return "", err
- }
- textHeader += header + "\n"
- }
-
- }
- // 在成功解析ZIP文件后,添加图片提取逻辑
- images, err := findImagesInZip(zr)
- if err != nil {
- fmt.Printf("Error extracting images: %v", err)
- }
- fmt.Printf("Images: %v", images)
-
- return textHeader + "\n" + textBody + "\n" + textFooter, nil
-}
-
-func getContentTypeDefinition(zf *zip.File) (*contentTypeDefinition, error) {
- f, err := zf.Open()
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- x := &contentTypeDefinition{}
- if err := xml.NewDecoder(io.LimitReader(f, maxBytes)).Decode(x); err != nil {
- return nil, err
- }
- return x, nil
-}
-
-func mapZipFiles(files []*zip.File) map[string]*zip.File {
- filesMap := make(map[string]*zip.File, 2*len(files))
- for _, f := range files {
- filesMap[f.Name] = f
- filesMap["/"+f.Name] = f
- }
- return filesMap
-}
-
-func parseDocxText(f *zip.File) (string, error) {
- r, err := f.Open()
- if err != nil {
- return "", fmt.Errorf("error opening '%v' from archive: %v", f.Name, err)
- }
- defer r.Close()
-
- text, err := DocxXMLToText(r)
- if err != nil {
- return "", fmt.Errorf("error parsing '%v': %v", f.Name, err)
- }
- return text, nil
-}
-
-// DocxXMLToText converts Docx XML into plain text.
-func DocxXMLToText(r io.Reader) (string, error) {
- return XMLToText(r, []string{"br", "p", "tab"}, []string{"instrText", "script"}, true)
-}
diff --git a/godo/ai/convert/find.go b/godo/ai/convert/find.go
deleted file mode 100644
index 50a43e0..0000000
--- a/godo/ai/convert/find.go
+++ /dev/null
@@ -1,336 +0,0 @@
-package convert
-
-import (
- "archive/zip"
- "encoding/xml"
- "fmt"
- "io"
- "log"
- "os"
- "path/filepath"
- "strings"
-
- "godo/libs"
-)
-
-// 常见图片扩展名列表
-var imageExtensions = []string{".jpg", ".jpeg", ".png", ".gif", ".bmp", ".webp", ".tif", ".tiff"}
-
-// 直接遍历ZIP并查找可能的图片文件
-func findImagesInZip(zr *zip.Reader) ([]string, error) {
- var images []string
- cacheDir := libs.GetCacheDir()
-
- for _, f := range zr.File {
- if isImageFile(f.Name) {
- images = append(images, f.Name)
- if err := extractImageToCache(zr, f.Name, cacheDir); err != nil {
- log.Printf("Error extracting image %s to cache: %v", f.Name, err)
- }
- }
- }
-
- return images, nil
-}
-
-// 判断文件是否为图片文件
-func isImageFile(fileName string) bool {
- ext := strings.ToLower(filepath.Ext(fileName))
- for _, imgExt := range imageExtensions {
- if ext == imgExt {
- return true
- }
- }
- return false
-}
-
-// 从zip中提取图片到缓存目录
-func extractImageToCache(zr *zip.Reader, imageName, cacheDir string) error {
- fileInZip, err := getFileByName(zr.File, imageName)
- if err != nil {
- return err
- }
-
- rc, err := fileInZip.Open()
- if err != nil {
- return fmt.Errorf("failed to open file %s in zip: %w", imageName, err)
- }
- defer rc.Close()
-
- justFileName := filepath.Base(imageName)
- outFilePath := filepath.Join(cacheDir, justFileName)
-
- outFile, err := os.Create(outFilePath)
- if err != nil {
- return fmt.Errorf("failed to create file %s: %w", outFilePath, err)
- }
- defer outFile.Close()
-
- _, err = io.Copy(outFile, rc)
- if err != nil {
- return fmt.Errorf("failed to copy image content to %s: %w", outFilePath, err)
- }
-
- // 提取图片周围的文本内容
- textContent, err := getSurroundingTextForOffice(zr, imageName)
- if err != nil {
- log.Printf("Error getting surrounding text for image %s: %v", imageName, err)
- } else {
- textFilePath := filepath.Join(cacheDir, strings.TrimSuffix(justFileName, filepath.Ext(justFileName))+".txt")
- if err := saveTextToFile(textContent, textFilePath); err != nil {
- log.Printf("Error saving text to file %s: %v", textFilePath, err)
- }
- }
-
- return nil
-}
-
-// 从zip.File数组中根据文件名查找并返回对应的文件
-func getFileByName(files []*zip.File, name string) (*zip.File, error) {
- for _, file := range files {
- if file.Name == name {
- return file, nil
- }
- }
- return nil, fmt.Errorf("file %s not found in zip archive", name)
-}
-
-// 获取 .pptx, .xlsx 或 .docx 文件中图片周围的文本内容
-func getSurroundingTextForOffice(zr *zip.Reader, imageName string) (string, error) {
- imageDir := filepath.Dir(imageName)
- xmlFiles, err := findRelevantXMLFiles(zr, imageDir)
- if err != nil {
- return "", err
- }
-
- for _, xmlFile := range xmlFiles {
- fileInZip, err := getFileByName(zr.File, xmlFile)
- if err != nil {
- continue
- }
-
- rc, err := fileInZip.Open()
- if err != nil {
- continue
- }
- defer rc.Close()
-
- doc, err := parseXMLDocument(rc, imageDir)
- if err != nil {
- continue
- }
-
- surroundingText := getSurroundingText(doc, filepath.Base(imageName))
- if surroundingText != "" {
- return truncateText(surroundingText), nil
- }
- }
-
- return "", fmt.Errorf("no surrounding text found for image %s", imageName)
-}
-
-// 查找相关的XML文件
-func findRelevantXMLFiles(zr *zip.Reader, imageDir string) ([]string, error) {
- switch {
- case strings.Contains(imageDir, "ppt/media"):
- return findFilesByPattern(zr, "ppt/slides/slide*.xml"), nil
- case strings.Contains(imageDir, "xl/media"):
- return findFilesByPattern(zr, "xl/worksheets/sheet*.xml"), nil
- case strings.Contains(imageDir, "word/media"):
- return []string{"word/document.xml"}, nil
- default:
- return nil, fmt.Errorf("unknown image directory %s", imageDir)
- }
-}
-
-// 解析XML文档
-func parseXMLDocument(rc io.ReadCloser, imageDir string) (interface{}, error) {
- var doc interface{}
- switch {
- case strings.Contains(imageDir, "ppt/media"):
- doc = &PPTXDocument{}
- case strings.Contains(imageDir, "xl/media"):
- doc = &XLSXDocument{}
- case strings.Contains(imageDir, "word/media"):
- doc = &DOCXDocument{}
- default:
- return nil, fmt.Errorf("unknown image directory %s", imageDir)
- }
-
- if err := xml.NewDecoder(rc).Decode(doc); err != nil {
- return nil, err
- }
-
- return doc, nil
-}
-
-// 获取图片周围的文本内容
-func getSurroundingText(doc interface{}, imagePath string) string {
- switch d := doc.(type) {
- case *PPTXDocument:
- for _, slide := range d.Slides {
- for _, shape := range slide.Shapes {
- if shape.Type == "pic" && shape.ImagePath == imagePath {
- return getTextFromSlide(slide)
- }
- }
- }
- case *XLSXDocument:
- for _, sheet := range d.Sheets {
- for _, drawing := range sheet.Drawings {
- for _, image := range drawing.Images {
- if image.ImagePath == imagePath {
- return getTextFromSheet(sheet)
- }
- }
- }
- }
- case *DOCXDocument:
- for _, paragraph := range d.Body.Paragraphs {
- for _, run := range paragraph.Runs {
- for _, pic := range run.Pictures {
- if pic.ImagePath == imagePath {
- return getTextFromParagraph(paragraph)
- }
- }
- }
- }
- }
- return ""
-}
-
-// 查找符合模式的文件
-func findFilesByPattern(zr *zip.Reader, pattern string) []string {
- var files []string
- for _, f := range zr.File {
- if matched, _ := filepath.Match(pattern, f.Name); matched {
- files = append(files, f.Name)
- }
- }
- return files
-}
-
-// 将文本内容保存到文件
-func saveTextToFile(text, filePath string) error {
- return os.WriteFile(filePath, []byte(text), 0644)
-}
-
-// 截断文本,确保不超过80个字符
-func truncateText(text string) string {
- if len(text) > 80 {
- return text[:80]
- }
- return text
-}
-
-// PPTXDocument 结构体定义
-type PPTXDocument struct {
- Slides []Slide `xml:"p:sld"`
-}
-
-type Slide struct {
- Shapes []Shape `xml:"p:cSld>p:spTree>p:sp"`
-}
-
-type Shape struct {
- Type string `xml:"p:pic"`
- ImagePath string `xml:"p:pic>p:blipFill>a:blip/@r:embed"`
- Elements []Element `xml:"p:txBody>a:p>a:r"`
-}
-
-type Element struct {
- Type string `xml:"a:t"`
- Value string `xml:",chardata"`
-}
-
-// XLSXDocument 结构体定义
-type XLSXDocument struct {
- Sheets []Sheet `xml:"worksheet"`
-}
-
-type Sheet struct {
- Rows []Row `xml:"sheetData>row"`
- Drawings []Drawing `xml:"drawing"`
-}
-
-type Row struct {
- Cells []Cell `xml:"c"`
-}
-
-type Cell struct {
- Value string `xml:"v"`
-}
-
-type Drawing struct {
- Images []Image `xml:"xdr:pic"`
-}
-
-type Image struct {
- ImagePath string `xml:"xdr:pic>xdr:blipFill>a:blip/@r:embed"`
-}
-
-// DOCXDocument 结构体定义
-type DOCXDocument struct {
- Body struct {
- Paragraphs []Paragraph `xml:"w:p"`
- } `xml:"w:body"`
-}
-
-type Paragraph struct {
- Runs []Run `xml:"w:r"`
-}
-
-type Run struct {
- Pictures []Picture `xml:"w:drawing"`
- Text []Text `xml:"w:t"`
-}
-
-type Text struct {
- Value string `xml:",chardata"`
-}
-
-type Picture struct {
- ImagePath string `xml:"wp:docPr/@name"`
-}
-
-// 从幻灯片中提取文本
-func getTextFromSlide(slide Slide) string {
- var text string
- for _, shape := range slide.Shapes {
- if shape.Type != "pic" {
- text += getTextFromShape(shape)
- }
- }
- return text
-}
-
-// 从形状中提取文本
-func getTextFromShape(shape Shape) string {
- var text string
- for _, element := range shape.Elements {
- text += element.Value
- }
- return text
-}
-
-// 从工作表中提取文本
-func getTextFromSheet(sheet Sheet) string {
- var text string
- for _, row := range sheet.Rows {
- for _, cell := range row.Cells {
- text += cell.Value
- }
- }
- return text
-}
-
-// 从段落中提取文本
-func getTextFromParagraph(paragraph Paragraph) string {
- var text string
- for _, run := range paragraph.Runs {
- for _, t := range run.Text {
- text += t.Value
- }
- }
- return text
-}
diff --git a/godo/ai/convert/http.go b/godo/ai/convert/http.go
deleted file mode 100644
index 1295876..0000000
--- a/godo/ai/convert/http.go
+++ /dev/null
@@ -1,381 +0,0 @@
-// /*
-// - GodoOS - A lightweight cloud desktop
-// - Copyright (C) 2024 https://godoos.com
-// *
-// - This program is free software: you can redistribute it and/or modify
-// - it under the terms of the GNU Lesser General Public License as published by
-// - the Free Software Foundation, either version 2.1 of the License, or
-// - (at your option) any later version.
-// *
-// - This program is distributed in the hope that it will be useful,
-// - but WITHOUT ANY WARRANTY; without even the implied warranty of
-// - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// - GNU Lesser General Public License for more details.
-// *
-// - You should have received a copy of the GNU Lesser General Public License
-// - along with this program. If not, see .
-// */
-package convert
-
-// import (
-// "bytes"
-// "encoding/json"
-// "fmt"
-// "godo/libs"
-// "io"
-// "log"
-// "mime"
-// "net/http"
-// "os"
-// "path/filepath"
-// "strconv"
-// "strings"
-// "time"
-// )
-
-// // UploadInfo 用于表示上传文件的信息
-// type UploadInfo struct {
-// Name string `json:"name"`
-// SavePath string `json:"save_path"`
-// Content string `json:"content"`
-// CreatedAt time.Time `json:"created_at"`
-// }
-
-// // SaveContentToFile 保存内容到文件并返回UploadInfo结构体
-// func SaveContentToFile(content, fileName string) (UploadInfo, error) {
-// uploadBaseDir, err := libs.GetUploadDir()
-// if err != nil {
-// return UploadInfo{}, err
-// }
-
-// // 去除文件名中的空格
-// fileNameWithoutSpaces := strings.ReplaceAll(fileName, " ", "_")
-// fileNameWithoutSpaces = strings.ReplaceAll(fileNameWithoutSpaces, "/", "")
-// fileNameWithoutSpaces = strings.ReplaceAll(fileNameWithoutSpaces, `\`, "")
-// // 提取文件名和扩展名
-// // 查找最后一个点的位置
-// lastDotIndex := strings.LastIndexByte(fileNameWithoutSpaces, '.')
-
-// // 如果找到点,则提取扩展名,否则视为没有扩展名
-// ext := ""
-// if lastDotIndex != -1 {
-// ext = fileNameWithoutSpaces[lastDotIndex:]
-// fileNameWithoutSpaces = fileNameWithoutSpaces[:lastDotIndex]
-// } else {
-// ext = ""
-// }
-// randFileName := fmt.Sprintf("%s_%s%s", fileNameWithoutSpaces, strconv.FormatInt(time.Now().UnixNano(), 10), ext)
-// savePath := filepath.Join(uploadBaseDir, time.Now().Format("2006-01-02"), randFileName)
-
-// if err := os.MkdirAll(filepath.Dir(savePath), 0755); err != nil {
-// return UploadInfo{}, err
-// }
-
-// if err := os.WriteFile(savePath, []byte(content), 0644); err != nil {
-// return UploadInfo{}, err
-// }
-
-// return UploadInfo{
-// Name: fileNameWithoutSpaces,
-// SavePath: savePath,
-// //Content: content,
-// CreatedAt: time.Now(),
-// }, nil
-// }
-
-// // MultiUploadHandler 处理多文件上传请求
-// func MultiUploadHandler(w http.ResponseWriter, r *http.Request) {
-// if err := r.ParseMultipartForm(10000 << 20); err != nil {
-// libs.Error(w, "Failed to parse multipart form")
-// return
-// }
-
-// files := r.MultipartForm.File["files"]
-// if len(files) == 0 {
-// libs.Error(w, "No file parts in the request")
-// return
-// }
-
-// fileInfoList := make([]UploadInfo, 0, len(files))
-
-// for _, fileHeader := range files {
-// file, err := fileHeader.Open()
-// if err != nil {
-// libs.Error(w, "Failed to open uploaded file")
-// continue
-// }
-// defer file.Close()
-
-// content, err := io.ReadAll(file)
-// if err != nil {
-// libs.Error(w, "Failed to read uploaded file")
-// continue
-// }
-
-// //log.Printf(string(content))
-// // 保存上传的文件内容
-// info, err := SaveContentToFile(string(content), fileHeader.Filename)
-// if err != nil {
-// libs.Error(w, "Failed to save uploaded file")
-// continue
-// }
-// log.Println(info.SavePath)
-// // 对上传的文件进行转换处理
-// convertData := Convert(info.SavePath) // Assuming convert.Convert expects a file path
-// log.Printf("convertData: %v", convertData)
-// if convertData.Data == "" {
-// continue
-// }
-// images := []ImagesInfo{}
-// resInfo := ResContentInfo{
-// Content: convertData.Data,
-// Images: images,
-// }
-// // 将转换后的数据写入文件
-// savePath := info.SavePath + "_result.json"
-// // if err := WriteConvertedDataToFile(convertData.Data, savePath); err != nil {
-// // serv.Err("Failed to write converted data to file", w)
-// // continue
-// // }
-// // 使用 json.MarshalIndent 直接获取内容的字节切片
-// contents, err := json.MarshalIndent(resInfo, "", " ")
-// if err != nil {
-// libs.Error(w, "failed to marshal reqBodies to JSON:"+savePath)
-// continue
-// }
-// // 将字节切片直接写入文件
-// if err := os.WriteFile(savePath, contents, 0644); err != nil {
-// libs.Error(w, "failed to write to file:"+savePath)
-// continue
-// }
-
-// //info.SavePath = savePath
-// fileInfoList = append(fileInfoList, info)
-// }
-
-// libs.Success(w, fileInfoList, "success")
-// }
-
-// // WriteConvertedDataToFile 将转换后的数据写入文件
-// func WriteConvertedDataToFile(data, filePath string) error {
-// file, err := os.Create(filePath)
-// if err != nil {
-// return err
-// }
-// defer file.Close()
-
-// _, err = file.WriteString(data)
-// if err != nil {
-// return err
-// }
-
-// fmt.Printf("Successfully wrote %d bytes to file %s.\n", len(data), filePath)
-// return nil
-// }
-
-// // jsonParamHandler 处理JSON参数请求
-// func JsonParamHandler(w http.ResponseWriter, r *http.Request) {
-// type RequestBody struct {
-// Path string `json:"path"`
-// }
-
-// var requestBody RequestBody
-// if err := json.NewDecoder(r.Body).Decode(&requestBody); err != nil {
-// libs.Error(w, "Invalid request body")
-// return
-// }
-
-// path := requestBody.Path
-// fmt.Printf("Parameter 'path' from JSON is: %s\n", path)
-
-// if path != "" {
-// resp := Convert(path)
-// w.Header().Set("Content-Type", "application/json")
-// if err := json.NewEncoder(w).Encode(resp); err != nil {
-// libs.Error(w, "Error encoding JSON")
-// return
-// }
-// return
-// }
-// }
-
-// // HandleURLPost 接收一个POST请求,其中包含一个URL参数,然后处理该URL指向的内容并保存
-// func HandleURLPost(w http.ResponseWriter, r *http.Request) {
-// var requestBody struct {
-// URL string `json:"url"`
-// }
-
-// decoder := json.NewDecoder(r.Body)
-// if err := decoder.Decode(&requestBody); err != nil {
-// libs.Error(w, "Invalid request body")
-// return
-// }
-// resp, err := http.Get(requestBody.URL)
-// if err != nil {
-// libs.Error(w, "Invalid request url:"+requestBody.URL)
-// return
-// }
-// defer resp.Body.Close()
-
-// body, errRead := io.ReadAll(resp.Body)
-// if errRead != nil {
-// libs.Error(w, "Invalid request body")
-// return
-// }
-// reader := bytes.NewReader(body)
-// res, err := ConvertHTML(reader)
-// if err != nil {
-// libs.Error(w, "Failed to convert content")
-// return
-// }
-// log.Printf("Converted content: %s", res)
-// // 使用通用的SaveContentToFile函数保存内容到文件
-// //fileName := "converted_from_url"
-// // 获取内容的第一行作为标题
-// fileName := strings.SplitN(res, "\n", 2)[0]
-// if fileName == "" {
-// fileName = "未命名网页"
-// }
-// fileName = fileName + ".html"
-// info, err := SaveContentToFile(res, fileName)
-// if err != nil {
-// libs.Error(w, "Failed to save converted content to file")
-// return
-// }
-// // 将转换后的数据写入文件
-// savePath := info.SavePath + "_result.json"
-// // if err := WriteConvertedDataToFile(info.Content, savePath); err != nil {
-// // serv.Err("Failed to write converted data to file", w)
-// // return
-// // }
-// // 使用 json.MarshalIndent 直接获取内容的字节切片
-// resInfo := ResContentInfo{
-// Content: info.Content,
-// }
-// contents, err := json.MarshalIndent(resInfo, "", " ")
-// if err != nil {
-// libs.Error(w, "failed to marshal reqBodies to JSON:"+savePath)
-// return
-// }
-// // 将字节切片直接写入文件
-// if err := os.WriteFile(savePath, contents, 0644); err != nil {
-// libs.Error(w, "failed to write to file:"+savePath)
-// return
-// }
-// w.Header().Set("Content-Type", "application/json")
-// if err := json.NewEncoder(w).Encode(info); err != nil {
-// libs.Error(w, "Error encoding JSON")
-// return
-// }
-// }
-// func ShowDetailHandler(w http.ResponseWriter, r *http.Request) {
-// // 从 URL 查询参数中获取图片路径
-// filePath := r.URL.Query().Get("path")
-// //log.Printf("imagePath: %s", imagePath)
-// // 检查图片路径是否为空或无效
-// if filePath == "" {
-// libs.Error(w, "Invalid file path")
-// return
-// }
-// var reqBodies ResContentInfo
-
-// if libs.PathExists(filePath + "_result.json") {
-// //log.Printf("ShowDetailHandler: %s", filePath)
-// filePath = filePath + "_result.json"
-// content, err := os.ReadFile(filePath)
-// if err != nil {
-// libs.Error(w, "Failed to open file")
-// return
-// }
-// err = json.Unmarshal(content, &reqBodies)
-// if err != nil {
-// libs.Error(w, "Failed to read file")
-// return
-// }
-// // 设置响应头
-// w.Header().Set("Content-Type", "text/plain; charset=utf-8")
-// resContent := reqBodies.Content + "/n"
-// for _, image := range reqBodies.Images {
-// resContent += image.Content + "/n"
-// }
-// // 写入响应体
-// _, err = w.Write([]byte(resContent))
-// if err != nil {
-// libs.Error(w, "Failed to write response")
-// return
-// }
-// } else {
-// // 确保图片路径是绝对路径
-// absImagePath, err := filepath.Abs(filePath)
-// //log.Printf("absImagePath: %s", absImagePath)
-// if err != nil {
-// libs.Error(w, err.Error())
-// return
-// }
-
-// // 获取文件的 MIME 类型
-// mimeType := mime.TypeByExtension(filepath.Ext(absImagePath))
-// if mimeType == "" {
-// mimeType = "application/octet-stream" // 如果无法识别,就用默认的二进制流类型
-// }
-
-// // 设置响应头的 MIME 类型
-// w.Header().Set("Content-Type", mimeType)
-
-// // 打开文件并读取内容
-// file, err := os.Open(absImagePath)
-// if err != nil {
-// libs.Error(w, err.Error())
-// return
-// }
-// defer file.Close()
-
-// // 将文件内容写入响应体
-// _, err = io.Copy(w, file)
-// if err != nil {
-// libs.Error(w, err.Error())
-// }
-// }
-
-// }
-// func ServeImage(w http.ResponseWriter, r *http.Request) {
-// // 从 URL 查询参数中获取图片路径
-// imagePath := r.URL.Query().Get("path")
-// //log.Printf("imagePath: %s", imagePath)
-// // 检查图片路径是否为空或无效
-// if imagePath == "" {
-// libs.Error(w, "Invalid image path")
-// return
-// }
-
-// // 确保图片路径是绝对路径
-// absImagePath, err := filepath.Abs(imagePath)
-// //log.Printf("absImagePath: %s", absImagePath)
-// if err != nil {
-// libs.Error(w, err.Error())
-// return
-// }
-
-// // 获取文件的 MIME 类型
-// mimeType := mime.TypeByExtension(filepath.Ext(absImagePath))
-// if mimeType == "" {
-// mimeType = "application/octet-stream" // 如果无法识别,就用默认的二进制流类型
-// }
-
-// // 设置响应头的 MIME 类型
-// w.Header().Set("Content-Type", mimeType)
-
-// // 打开文件并读取内容
-// file, err := os.Open(absImagePath)
-// if err != nil {
-// libs.Error(w, err.Error())
-// return
-// }
-// defer file.Close()
-
-// // 将文件内容写入响应体
-// _, err = io.Copy(w, file)
-// if err != nil {
-// libs.Error(w, err.Error())
-// }
-// }
diff --git a/godo/ai/convert/image.go b/godo/ai/convert/image.go
deleted file mode 100644
index f607b1b..0000000
--- a/godo/ai/convert/image.go
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * GodoOS - A lightweight cloud desktop
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package convert
-
-import (
- "io"
-
- "godo/ai/convert/libs"
-)
-
-func ConvertImage(r io.Reader) (string, error) {
- // 获取临时文件的绝对路径
- absFilePath, tmpfile, err := libs.GetTempFile(r, "prefix-image")
- if err != nil {
- return "", err
- }
- paths := []string{absFilePath}
- // 识别文本
- output, err := libs.RunRapid(paths)
- if err != nil {
- return "", err
- }
- libs.CloseTempFile(tmpfile)
- // resultString, err := libs.ExtractText(output)
- // if err != nil {
- // return "", err
- // }
- // fmt.Println(resultString)
- return output, nil
-}
diff --git a/godo/ai/convert/img.go b/godo/ai/convert/img.go
deleted file mode 100644
index 0410a01..0000000
--- a/godo/ai/convert/img.go
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * GodoOS - A lightweight cloud desktop
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package convert
-
-import (
- "crypto/md5"
- "fmt"
- lib "godo/ai/convert/libs"
- "godo/libs"
- "io"
- "log"
- "os"
- "path/filepath"
-)
-
-type ResContentInfo struct {
- Content string `json:"content"`
- Images []ImagesInfo `json:"image"`
-}
-type ImagesInfo struct {
- Path string `json:"path"`
- Content string `json:"content"`
-}
-
-// 计算文件的MD5哈希值
-func calculateFileHash(filePath string) (string, error) {
- file, err := os.Open(filePath)
- if err != nil {
- return "", err
- }
- defer file.Close()
-
- hasher := md5.New()
- if _, err := io.Copy(hasher, file); err != nil {
- return "", err
- }
-
- return fmt.Sprintf("%x", hasher.Sum(nil)), nil
-}
-
-// 复制图片并检查大小和MD5
-func CopyImages(destDir string) ([]ImagesInfo, error) {
- copiedFiles := []ImagesInfo{}
- srcDir, err := libs.GetTrueCacheDir()
- if !libs.PathExists(srcDir) {
- return copiedFiles, fmt.Errorf("source directory does not exist: %s", srcDir)
- }
- if err != nil {
- return copiedFiles, fmt.Errorf("failed to create temporary cache directory: %w", err)
- }
- if !libs.PathExists(destDir) {
- if err := os.MkdirAll(destDir, 0755); err != nil {
- return copiedFiles, fmt.Errorf("failed to create destination directory: %w", err)
- }
- }
- err = filepath.Walk(srcDir, func(path string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- if !info.IsDir() {
- ext := filepath.Ext(path)
- if isImageExtension(ext) {
- destPath := filepath.Join(destDir, info.Name())
-
- // 检查目标文件是否存在且大小相同
- if fileInfo, err := os.Stat(destPath); err == nil {
- if fileInfo.Size() == info.Size() {
- // 文件大小相同,进一步检查MD5
- srcHash, err := calculateFileHash(path)
- if err != nil {
- log.Printf("Error calculating source hash for %s: %v", path, err)
- return err
- }
- destHash, err := calculateFileHash(destPath)
- if err != nil {
- log.Printf("Error calculating destination hash for %s: %v", destPath, err)
- return err
- }
- if srcHash == destHash {
- fmt.Printf("Skipping %s because a file with the same size and content already exists.\n", path)
- return nil
- }
- }
- }
- paths := []string{path}
- content, err := lib.RunRapid(paths)
- if err != nil {
- content = ""
- }
- // 复制文件
- if err := copyImagesFile(path, destPath); err != nil {
- return err
- }
-
- copiedFiles = append(copiedFiles, ImagesInfo{Path: destPath, Content: content}) // 记录复制成功的文件路径
- fmt.Printf("Copied %s to %s\n", path, destPath)
- }
- }
- return nil
- })
- defer func() {
- os.RemoveAll(srcDir)
- }()
- if len(copiedFiles) < 1 {
- os.RemoveAll(destDir)
- }
- if err != nil {
- return copiedFiles, err
- }
- return copiedFiles, nil
-}
-
-// 辅助函数检查文件扩展名是否为图片
-func isImageExtension(ext string) bool {
- switch ext {
- case ".jpg", ".jpeg", ".jpe", ".jfif", ".jfif-tbnl", ".png", ".gif", ".bmp", ".webp", ".tif", ".tiff":
- return true
- default:
- return false
- }
-}
-func isConvertImageFile(ext string) bool {
- switch ext {
- case ".docx", ".pdf", ".pptx", ".odt":
- return true
- default:
- return false
- }
-}
-
-// 复制单个文件
-func copyImagesFile(src, dst string) error {
- in, err := os.ReadFile(src)
- if err != nil {
- return err
- }
- return os.WriteFile(dst, in, 0644)
-}
diff --git a/godo/ai/convert/libs/file.go b/godo/ai/convert/libs/file.go
deleted file mode 100644
index 79e6be2..0000000
--- a/godo/ai/convert/libs/file.go
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * GodoOS - A lightweight cloud desktop
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package libs
-
-import (
- "errors"
- "fmt"
- "godo/libs"
- "io"
- "log"
- "os"
- "path/filepath"
- "runtime"
-)
-
-func getXpdfDir(exename string) (string, error) {
- convertDir, err := getConvertDir()
- if err != nil {
- log.Fatal(err)
- return "", err
- }
- var path string
- if runtime.GOOS == "windows" {
- path = filepath.Join(convertDir, "pdf", exename+".exe")
- } else {
- path = filepath.Join(convertDir, "pdf", exename)
- }
- if libs.PathExists(path) {
- return path, nil
- } else {
- return "", errors.New("pdf convert exe not found")
- }
-}
-func getRapidDir() (string, error) {
- convertDir, err := getConvertDir()
- if err != nil {
- log.Fatal(err)
- return "", err
- }
- var path string
- if runtime.GOOS == "windows" {
- path = filepath.Join(convertDir, "rapid", "RapidOcrOnnx.exe")
- } else {
- path = filepath.Join(convertDir, "rapid", "RapidOcrOnnx")
- }
- if libs.PathExists(path) {
- return path, nil
- } else {
- return "", errors.New("RapidOcrOnnx not found")
- }
-}
-
-func getRapidModelDir() (string, error) {
- convertDir, err := getConvertDir()
- if err != nil {
- log.Fatal(err)
- return "", err
- }
- path := filepath.Join(convertDir, "rapid", "models")
- if libs.PathExists(path) {
- return path, nil
- } else {
- return "", errors.New("RapidOcrOnnx model not found")
- }
-}
-func getConvertDir() (string, error) {
- runDir, err := libs.GetAiRunDir()
- if err != nil {
- return "", fmt.Errorf("failed to get user home directory: %w", err)
- }
- return filepath.Join(runDir, "goconv"), nil
-}
-
-func GetTempDir(pathname string) (string, error) {
- tempDir, err := os.MkdirTemp("", pathname)
- if err != nil {
- log.Println("Failed to create temporary directory:", err)
- return "./", err
- }
-
- log.Println("Temporary directory created:", tempDir)
- // defer func() {
- // os.RemoveAll(tempDir)
- // }()
- return tempDir, nil
-}
-func GetTempFile(r io.Reader, prename string) (string, *os.File, error) {
- // 创建临时文件
- tmpfile, err := os.CreateTemp("", prename)
-
- if err != nil {
- return "", tmpfile, err
- }
-
- // 将Reader内容写入临时文件
- if _, err := io.Copy(tmpfile, r); err != nil {
- return "", tmpfile, err
- }
-
- // 获取临时文件的绝对路径
- absFilePath, err := filepath.Abs(tmpfile.Name())
- if err != nil {
- return "", tmpfile, err
- }
- return absFilePath, tmpfile, nil
-}
-func CloseTempFile(tmpfile *os.File) {
- defer func() {
- _ = tmpfile.Close()
- _ = os.Remove(tmpfile.Name()) // 根据需要决定是否删除临时文件
- }()
-}
diff --git a/godo/ai/convert/libs/kind.go b/godo/ai/convert/libs/kind.go
deleted file mode 100644
index d7aa972..0000000
--- a/godo/ai/convert/libs/kind.go
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
-Type definitions for markdown elements.
-*/
-/*
- * GodoOS - A lightweight cloud desktop
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package libs
-
-import "fmt"
-
-//go:generate stringer -type=Kind
-type Kind int
-
-//go:generate stringer -type=ElementType
-type ElementType int
-
-// specific types
-const (
- // block types
- Head Kind = iota
- Paragraph
- List
- QuoteBlock
- CodeBlock
- Rule
- // inline types
- Emphasis
- Strong
- Link
- Code
- Image
-)
-
-// element types
-const (
- Block ElementType = iota
- Inline
-)
-
-const _Kind_name = "HeadParagraphListQuoteBlockCodeBlockRuleEmphasisStrongLinkCodeImage"
-
-var _Kind_index = [...]uint8{4, 13, 17, 27, 36, 40, 48, 54, 58, 62, 67}
-
-func (i Kind) String() string {
- if i < 0 || i >= Kind(len(_Kind_index)) {
- return fmt.Sprintf("Kind(%d)", i)
- }
- hi := _Kind_index[i]
- lo := uint8(0)
- if i > 0 {
- lo = _Kind_index[i-1]
- }
- return _Kind_name[lo:hi]
-}
-
-const _ElementType_name = "BlockInline"
-
-var _ElementType_index = [...]uint8{5, 11}
-
-func (i ElementType) String() string {
- if i < 0 || i >= ElementType(len(_ElementType_index)) {
- return fmt.Sprintf("ElementType(%d)", i)
- }
- hi := _ElementType_index[i]
- lo := uint8(0)
- if i > 0 {
- lo = _ElementType_index[i-1]
- }
- return _ElementType_name[lo:hi]
-}
diff --git a/godo/ai/convert/libs/rapid.go b/godo/ai/convert/libs/rapid.go
deleted file mode 100644
index 59d187e..0000000
--- a/godo/ai/convert/libs/rapid.go
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * GodoOS - A lightweight cloud desktop
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package libs
-
-import (
- "bytes"
- _ "embed" // Needed for go:embed
- "fmt"
- "log"
- "os/exec"
- "regexp"
- "runtime"
- "strings"
-)
-
-/*
-*
-./RapidOcrOnnx --models models \
---det ch_PP-OCRv4_det_infer-v7.onnx \
---rec ch_PP-OCRv4_rec_infer-v7.onnx \
---cls ch_ppocr_mobile_v2.0_cls_infer.onnx \
---keys ppocr_keys_v1.txt \
---image $TARGET_IMG \
---numThread $NUM_THREADS \
---padding 50 \
---maxSideLen 1024 \
---boxScoreThresh 0.5 \
---boxThresh 0.3 \
---unClipRatio 1.6 \
---doAngle 1 \
---mostAngle 1 \
---GPU $GPU_INDEX
-*/
-func RunRapid(imagePaths []string) (string, error) {
-
- results := make([]string, 0, len(imagePaths))
- //log.Printf("the image paths are- %v\n", imagePaths)
- runFile, err := getRapidDir()
- if err != nil {
- return "", err
- }
-
- modelDir, err := getRapidModelDir()
- if err != nil {
- return "", err
- }
- for _, imagePath := range imagePaths {
- //log.Printf("the image path is- %v\n", imagePath)
- res, err := ConvertImage(runFile, modelDir, imagePath)
- if err != nil {
- log.Printf("- %v\n", err)
- //return "", err
- } else {
- results = append(results, res)
- }
- }
- //res,err := ConvertImage(tmpfile, imagePath)
-
- finalResult := strings.Join(results, "\n")
- return finalResult, err
-}
-
-// func GetImageContent(imagePath string) (string, error) {
-// runFile, err := getRapidDir()
-// if err != nil {
-// return "", err
-// }
-
-// modelDir, err := getRapidModelDir()
-// if err != nil {
-// return "", err
-// }
-// return ConvertImage(runFile, modelDir, imagePath)
-// }
-func ConvertImage(runFile string, modelDir string, imagePath string) (string, error) {
-
- // 构建命令
- cmdArgs := []string{
- runFile,
- "--models", modelDir,
- "--det", "ch_PP-OCRv4_det_infer-v7.onnx",
- "--rec", "ch_PP-OCRv4_rec_infer-v7.onnx",
- "--cls", "ch_ppocr_mobile_v2.0_cls_infer.onnx",
- "--keys", "ppocr_keys_v1.txt",
- "--image", imagePath,
- "--numThread", fmt.Sprintf("%d", runtime.NumCPU()),
- "--padding", "50",
- "--maxSideLen", "1024",
- "--boxScoreThresh", "0.5",
- "--boxThresh", "0.3",
- "--unClipRatio", "1.6",
- "--doAngle", "1",
- "--mostAngle", "1",
- "--GPU", "-1",
- }
- // 打印将要执行的命令行
- cmdStr := strings.Join(cmdArgs, " ")
- fmt.Printf("Executing command: %s\n", cmdStr)
- // 使用Command构造命令
- cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
- var out bytes.Buffer
- var stderr bytes.Buffer
- cmd.Stdout = &out // 捕获标准输出
- cmd.Stderr = &stderr // 捕获标准错误
-
- // 执行命令
- err := cmd.Run()
- if err != nil {
- // 打印错误信息
- log.Printf("执行命令时出错: %v, stderr: %s", err, stderr.String())
- return "", err
- }
- // 输出命令结果
- outputStr := out.String()
- //CloseDll(tmpfile)
- resText, err := ExtractText(outputStr)
- if err != nil {
- log.Printf("提取文本时出错: %v", err)
- return "", err
- }
- return resText, err
-}
-
-func ExtractText(output string) (string, error) {
- // 查找 "=====End detect=====" 的位置
- endDetectIndex := strings.Index(output, "=====End detect=====")
- if endDetectIndex == -1 {
- return "", fmt.Errorf("expected '=====End detect=====' not found in output")
- }
-
- // 从 "=====End detect=====" 后面开始提取文本内容
- contentStartIndex := endDetectIndex + len("=====End detect=====\n")
- if contentStartIndex >= len(output) {
- return "", fmt.Errorf("unexpected end of output after '=====End detect====='")
- }
-
- // 提取从 "=====End detect=====" 到末尾的字符串,然后去除末尾的花括号
- tempContent := output[contentStartIndex:]
-
- // 去除开头的数字和空格,以及 "FullDetectTime(...)" 部分
- cleanedContent := strings.TrimSpace(strings.SplitN(tempContent, "\n", 2)[1])
-
- // 确保去除了所有不需要的内容
- //cleanedOutput := strings.TrimSuffix(cleanedContent, "}")
- // 使用正则表达式去除连续的空行
-
- // 去除单独的 ?、: B、>,以及它们前后的空白字符
- re := regexp.MustCompile(`(?m)^\s*(?:\?|\s*B|>|:)\s*$`) // (?m) 使 ^ 和 $ 匹配每一行的开始和结束
- cleanedOutput := re.ReplaceAllString(cleanedContent, "") // 删除这些行
- // 这里的正则表达式匹配一个或多个连续的换行符
- re = regexp.MustCompile(`\n\s*\n`)
- cleanedOutput = re.ReplaceAllString(cleanedOutput, "\n") // 将连续的空行替换为单个换行符
- // 返回提取的文本内容
- return cleanedOutput, nil
-}
diff --git a/godo/ai/convert/libs/xpdf.go b/godo/ai/convert/libs/xpdf.go
deleted file mode 100644
index 90f7592..0000000
--- a/godo/ai/convert/libs/xpdf.go
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * GodoOS - A lightweight cloud desktop
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package libs
-
-import (
- "bytes"
- "errors"
- "fmt"
- "godo/libs"
- "log"
- "os"
- "os/exec"
- "path/filepath"
- "strings"
-)
-
-func RunXpdf(pdfPath string) (string, error) {
- tempDir, err := GetTempDir("xpdf-dirs")
- if err != nil {
- return "", err
- }
- tempDirSlash := tempDir
- if !strings.HasSuffix(tempDir, string(filepath.Separator)) { // 检查路径是否已经以分隔符结尾
- tempDirSlash = tempDir + string(filepath.Separator) // 添加分隔符
- }
- runFile, err := getXpdfDir("pdftopng")
- if err != nil {
- return "", err
- }
- // 构建命令
- cmdArgs := []string{
- runFile,
- "-mono",
- pdfPath,
- tempDirSlash,
- }
- // 打印将要执行的命令行
- cmdStr := strings.Join(cmdArgs, " ")
- log.Printf("Executing command: %s\n", cmdStr)
- // 使用Command构造命令
- cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
- //var out bytes.Buffer
- var stderr bytes.Buffer
- //cmd.Stdout = &out // 捕获标准输出
- cmd.Stderr = &stderr // 捕获标准错误
- // 执行命令
- err = cmd.Run()
- if err != nil {
- // 打印错误信息
- log.Printf("执行命令时出错: %v, stderr: %s", err, stderr.String())
- return "", err
- }
- // 输出命令结果
- // outputStr := out.String()
- // log.Printf("Output command: %s\n", outputStr)
- err = GetImages(pdfPath)
- if err != nil {
- log.Println("Failed to get images:", err)
- return "", err
- }
-
- dir, err := os.ReadDir(tempDir)
- if err != nil {
- log.Println("Failed to read directory:", err)
- return "", err
- }
- imagePaths := []string{}
- for _, entry := range dir {
- absPath := filepath.Join(tempDir, entry.Name())
- //log.Println(absPath)
- imagePaths = append(imagePaths, absPath)
- }
- //log.Printf("imagePaths: %v\n", imagePaths)
- if len(imagePaths) < 1 {
- return "", errors.New("no images found")
- }
- text, err := RunRapid(imagePaths)
- if err != nil {
- log.Println("Failed to run rapid:", err)
- return "", err
- }
-
- defer func() {
-
- if err := os.RemoveAll(tempDir); err != nil {
- log.Printf("Error removing temp dir: %s", err)
- }
- }()
- // go func(pdfPath string) {
-
- // }(pdfPath)
- return text, nil
-}
-func GetImages(pdfPath string) error {
- cacheDir := libs.GetCacheDir()
- tempDirSlash := cacheDir
- if !strings.HasSuffix(cacheDir, string(filepath.Separator)) { // 检查路径是否已经以分隔符结尾
- tempDirSlash = cacheDir + string(filepath.Separator) // 添加分隔符
- }
- //log.Printf("tempDirSlash: %s\n", tempDirSlash)
- runFile, err := getXpdfDir("pdfimages")
- if err != nil {
- return err
- }
- cmdArgs := []string{
- runFile,
- "-j",
- pdfPath,
- tempDirSlash,
- }
- //log.Printf("Executing command: %s\n", strings.Join(cmdArgs, " "))
- cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
- var stderr bytes.Buffer
- //cmd.Stdout = &out // 捕获标准输出
- cmd.Stderr = &stderr // 捕获标准错误
- if err := cmd.Run(); err != nil {
- log.Printf("执行命令时出错: %v, stderr: %s", err, stderr.String())
- return fmt.Errorf("failed to run pdfimages: %w", err)
- }
- return nil
-}
diff --git a/godo/ai/convert/main.go b/godo/ai/convert/main.go
deleted file mode 100644
index 05715bf..0000000
--- a/godo/ai/convert/main.go
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * GodoOS - A lightweight cloud desktop
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package convert
-
-import (
- "fmt"
- "os"
- "path"
- "strings"
-)
-
-const maxBytes = 1024 << 20 // 1GB
-
-type Res struct {
- Status int
- Data string
-}
-
-// Convert 函数根据文件类型,将文件内容转换为字符串格式。
-// 支持的文件类型包括:.doc, .docx, .odt, .pdf, .csv, .xls, .xlsx, .tsv,
-// .pptx, .rtf, .epub, .xml, .xhtml, .html, .htm, .jpg, .jpeg, .jpe, .jfif,
-// .jfif-tbnl, .png, .gif, .bmp, .webp, .tif, .tiff, .txt, .md。
-// 如果文件以 http 开头,将直接调用 ConvertHttp 函数进行处理。
-// 参数:
-//
-// filename string - 文件名或文件URL。
-//
-// 返回值:
-//
-// Res - 包含转换结果的状态码和数据。
-func Convert(filename string) Res {
- //libs.InitConvertDir()
- // 检查文件名是否以 http 开头,是则调用 ConvertHttp 处理
- if strings.HasPrefix(filename, "http") {
- return ConvertHttp(filename)
- }
- // 尝试打开文件
- r, err := os.Open(filename)
- if err != nil {
- // 打开文件失败,返回错误信息
- return Res{
- Status: 201,
- Data: fmt.Sprintf("error opening file: %v", err),
- }
-
- }
- // 确保文件在函数返回前被关闭
- defer r.Close()
-
- // 获取文件扩展名,并转为小写
- ext := strings.ToLower(path.Ext(filename))
-
- var body string
- // 根据文件扩展名,调用相应的转换函数
- switch ext {
- case ".doc":
- body, err = ConvertDoc(r)
- case ".docx":
- body, err = ConvertDocx(r)
- case ".odt":
- body, err = ConvertODT(r)
- // .pages 类型文件的处理暂不支持
- // case ".pages":
- // return "application/vnd.apple.pages"
- case ".pdf":
- body, err = ConvertPDF(r)
- case ".csv", ".xls", ".xlsx", ".tsv":
- body, err = ConvertXlsx(r)
- case ".pptx":
- body, err = ConvertPptx(r)
- case ".rtf":
- body, err = ConvertRTF(r)
- case ".epub":
- body, err = ConvetEpub(r)
- case ".xml":
- body, err = ConvertXML(r)
- case ".xhtml", ".html", ".htm":
- body, err = ConvertHTML(r)
- case ".jpg", ".jpeg", ".jpe", ".jfif", ".jfif-tbnl", ".png", ".gif", ".bmp", ".webp", ".tif", ".tiff":
- body, err = ConvertImage(r)
- case ".md":
- body, err = ConvertMd(r)
- case ".txt":
- body, err = ConvertTxt(r)
- }
-
- // 转换过程中若发生错误,返回错误信息
- if err != nil {
- return Res{
- Status: 204,
- Data: fmt.Sprintf("error opening file: %v", err),
- }
- }
- return Res{
- Status: 0,
- Data: body,
- }
-}
diff --git a/godo/ai/convert/main_test.go b/godo/ai/convert/main_test.go
deleted file mode 100644
index 4b5fed3..0000000
--- a/godo/ai/convert/main_test.go
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * GodoOS - A lightweight cloud desktop
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package convert
-
-import (
- "fmt"
- "path/filepath"
- "testing"
-)
-
-func TestConvert(t *testing.T) {
- tempDir := "./testdata"
- cases := []struct {
- name string
- filename string
- expectedRes Res
- }{
- {
- name: "HTTP",
- filename: "https://www.baidu.com",
- expectedRes: Res{Status: 0, Data: ""},
- },
- {
- name: "docx",
- filename: filepath.Join(tempDir, "test.docx"),
- expectedRes: Res{Status: 0, Data: ""},
- },
- {
- name: "xls",
- filename: filepath.Join(tempDir, "test.xls"),
- expectedRes: Res{Status: 0, Data: ""},
- },
- {
- name: "pdf",
- filename: filepath.Join(tempDir, "test.pdf"),
- expectedRes: Res{Status: 0, Data: ""},
- },
- {
- name: "pptx",
- filename: filepath.Join(tempDir, "test.pptx"),
- expectedRes: Res{Status: 0, Data: ""},
- },
- {
- name: "rtf",
- filename: filepath.Join(tempDir, "test.rtf"),
- expectedRes: Res{Status: 0, Data: ""},
- },
- {
- name: "odt",
- filename: filepath.Join(tempDir, "test.odt"),
- expectedRes: Res{Status: 0, Data: ""},
- },
- {
- name: "txt",
- filename: filepath.Join(tempDir, "test.txt"),
- expectedRes: Res{Status: 0, Data: ""},
- },
- {
- name: "md",
- filename: filepath.Join(tempDir, "test.md"),
- expectedRes: Res{Status: 0, Data: ""},
- },
- {
- name: "html",
- filename: filepath.Join(tempDir, "test.html"),
- expectedRes: Res{Status: 0, Data: ""},
- },
- {
- name: "jpg",
- filename: filepath.Join(tempDir, "test.jpg"),
- expectedRes: Res{Status: 0, Data: ""},
- },
- {
- name: "xml",
- filename: filepath.Join(tempDir, "test.xml"),
- expectedRes: Res{Status: 0, Data: ""},
- },
- {
- name: "epub",
- filename: filepath.Join(tempDir, "test.epub"),
- expectedRes: Res{Status: 0, Data: ""},
- },
- }
-
- for _, tc := range cases {
- t.Run(tc.name, func(t *testing.T) {
-
- // 调用 Convert 函数并检查结果
- res := Convert(tc.filename)
- fmt.Printf("res: %v\n", tc.filename)
- // 比较结果
- if res.Status != tc.expectedRes.Status {
- t.Errorf("For case '%s', expected status %d, got %d", tc.name, tc.expectedRes.Status, res.Status)
- }
- // 如果需要,也可以比较 Data 字段
- // 注意:根据实际情况调整比较逻辑,此处省略了对 Data 的直接比较
- })
- }
-}
diff --git a/godo/ai/convert/md.go b/godo/ai/convert/md.go
deleted file mode 100644
index 07b31c9..0000000
--- a/godo/ai/convert/md.go
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * GodoOS - A lightweight cloud desktop
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package convert
-
-import (
- "io"
- "regexp"
- "strings"
-)
-
-func ConvertMd(r io.Reader) (string, error) {
- b, err := io.ReadAll(r)
- if err != nil {
- return "", err
- }
- re := regexp.MustCompile(`<[^>]*>`)
- content := re.ReplaceAllString(string(b), "")
- reMarkdown := regexp.MustCompile(`(\*{1,4}|_{1,4}|\#{1,6})`)
- content = reMarkdown.ReplaceAllString(content, "")
- // 移除换行符
- content = strings.ReplaceAll(content, "\r", "")
- content = strings.ReplaceAll(content, "\n", "")
-
- // 移除多余的空格
- content = strings.TrimSpace(content)
- return content, nil
-}
diff --git a/godo/ai/convert/pdf.go b/godo/ai/convert/pdf.go
deleted file mode 100644
index cb72bd9..0000000
--- a/godo/ai/convert/pdf.go
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * GodoOS - A lightweight cloud desktop
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package convert
-
-import (
- "io"
-
- "godo/ai/convert/libs"
-)
-
-func ConvertPDF(r io.Reader) (string, error) {
- // 获取临时文件的绝对路径
- absFilePath, tmpfile, err := libs.GetTempFile(r, "prefix-pdf")
- if err != nil {
- return "", err
- }
- output, err := libs.RunXpdf(absFilePath)
- if err != nil {
- return "", err
- }
- libs.CloseTempFile(tmpfile)
- return output, nil
-
-}
diff --git a/godo/ai/convert/pptx.go b/godo/ai/convert/pptx.go
deleted file mode 100644
index 435d383..0000000
--- a/godo/ai/convert/pptx.go
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * GodoOS - A lightweight cloud desktop
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package convert
-
-import (
- "archive/zip"
- "bytes"
- "fmt"
- "io"
- "os"
- "strings"
-)
-
-// ConvertPptx converts an MS PowerPoint pptx file to text.
-func ConvertPptx(r io.Reader) (string, error) {
- var size int64
-
- // Common case: if the reader is a file (or trivial wrapper), avoid
- // loading it all into memory.
- var ra io.ReaderAt
- if f, ok := r.(interface {
- io.ReaderAt
- Stat() (os.FileInfo, error)
- }); ok {
- si, err := f.Stat()
- if err != nil {
- return "", err
- }
- size = si.Size()
- ra = f
- } else {
- b, err := io.ReadAll(r)
- if err != nil {
- return "", nil
- }
- size = int64(len(b))
- ra = bytes.NewReader(b)
- }
-
- zr, err := zip.NewReader(ra, size)
- if err != nil {
- return "", fmt.Errorf("could not unzip: %v", err)
- }
-
- zipFiles := mapZipFiles(zr.File)
-
- contentTypeDefinition, err := getContentTypeDefinition(zipFiles["[Content_Types].xml"])
- if err != nil {
- return "", err
- }
-
- var textBody string
- for _, override := range contentTypeDefinition.Overrides {
- f := zipFiles[override.PartName]
-
- switch override.ContentType {
- case "application/vnd.openxmlformats-officedocument.presentationml.slide+xml",
- "application/vnd.openxmlformats-officedocument.drawingml.diagramData+xml":
- body, err := parseDocxText(f)
- if err != nil {
- return "", fmt.Errorf("could not parse pptx: %v", err)
- }
- textBody += body + "\n"
- }
- }
- // 在成功解析ZIP文件后,添加图片提取逻辑
- images, err := findImagesInZip(zr)
- if err != nil {
- fmt.Printf("Error extracting images: %v", err)
- }
- fmt.Printf("Images: %v", images)
-
- return strings.TrimSuffix(textBody, "\n"), nil
-}
diff --git a/godo/ai/convert/rtf.go b/godo/ai/convert/rtf.go
deleted file mode 100644
index f84cf47..0000000
--- a/godo/ai/convert/rtf.go
+++ /dev/null
@@ -1,521 +0,0 @@
-/*
- * GodoOS - A lightweight cloud desktop
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package convert
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "strconv"
- "strings"
- "time"
-
- "github.com/EndFirstCorp/peekingReader"
-)
-
-type stack struct {
- top *element
- size int
-}
-
-type element struct {
- value string
- next *element
-}
-
-func (s *stack) Len() int {
- return s.size
-}
-
-func (s *stack) Push(value string) {
- s.top = &element{value, s.top}
- s.size++
-}
-
-func (s *stack) Peek() string {
- if s.size == 0 {
- return ""
- }
- return s.top.value
-}
-
-func (s *stack) Pop() string {
- if s.size > 0 {
- var v string
- v, s.top = s.top.value, s.top.next
- s.size--
- return v
- }
- return ""
-}
-
-// Text is used to convert an io.Reader containing RTF data into
-// plain text
-func ConvertRTF(r io.Reader) (string, error) {
- pr := peekingReader.NewBufReader(r)
-
- var text bytes.Buffer
- var symbolStack stack
- for b, err := pr.ReadByte(); err == nil; b, err = pr.ReadByte() {
- switch b {
- case '\\':
- err := ReadRtfControl(pr, &symbolStack, &text)
- if err != nil {
- return "", err
- }
- case '{', '}':
- case '\n', '\r': // noop
- default:
- text.WriteByte(b)
- }
- }
- return string(text.Bytes()), nil
-}
-
-func ReadRtfControl(r peekingReader.Reader, s *stack, text *bytes.Buffer) error {
- control, num, err := tokenizeControl(r)
- if err != nil {
- return err
- }
- if control == "*" { // this is an extended control sequence
- err := readUntilClosingBrace(r)
- if err != nil {
- return err
- }
- if last := s.Peek(); last != "" {
- val, err := getParams(r) // last control was interrupted, so finish handling Params
- handleParams(control, val, text)
- return err
- }
- return nil
- }
- if isUnicode, u := getUnicode(control); isUnicode {
- text.WriteString(u)
- return nil
- }
- if control == "" {
- p, err := r.Peek(1)
- if err != nil {
- return err
- }
- if p[0] == '\\' || p[0] == '{' || p[0] == '}' { // this is an escaped character
- text.WriteByte(p[0])
- r.ReadByte()
- return nil
- }
- text.WriteByte('\n')
- return nil
- }
- if control == "binN" {
- return handleBinary(r, control, num)
- }
-
- if symbol, found := convertSymbol(control); found {
- text.WriteString(symbol)
- }
-
- val, err := getParams(r)
- if err != nil {
- return err
- }
- handleParams(control, val, text)
- s.Push(control)
- return nil
-}
-
-func tokenizeControl(r peekingReader.Reader) (string, int, error) {
- var buf bytes.Buffer
- isHex := false
- numStart := -1
- for {
- p, err := r.Peek(1)
- if err != nil {
- return "", -1, err
- }
- b := p[0]
- switch {
- case b == '*' && buf.Len() == 0:
- r.ReadByte() // consume valid digit
- return "*", -1, nil
- case b == '\'' && buf.Len() == 0:
- isHex = true
- buf.WriteByte(b)
- r.ReadByte()
- case b >= '0' && b <= '9' || b == '-':
- if numStart == -1 {
- numStart = buf.Len()
- } else if numStart == 0 {
- return "", -1, errors.New("Unexpected control sequence. Cannot begin with digit")
- }
- buf.WriteByte(b)
- r.ReadByte() // consume valid digit
- case b >= 'a' && b <= 'z' || b >= 'A' && b <= 'Z':
- if numStart > 0 { // we've already seen alpha character(s) plus digit(s)
- c, num := canonicalize(buf.String(), numStart)
- return c, num, nil
- }
- buf.WriteByte(b)
- r.ReadByte()
- default:
- if isHex {
- return buf.String(), -1, nil
- }
- c, num := canonicalize(buf.String(), numStart)
- return c, num, nil
- }
- }
-}
-
-func canonicalize(control string, numStart int) (string, int) {
- if numStart == -1 || numStart >= len(control) {
- return control, -1
- }
- num, err := strconv.Atoi(control[numStart:])
- if err != nil {
- return control, -1
- }
- return control[:numStart] + "N", num
-}
-
-func getUnicode(control string) (bool, string) {
- if len(control) < 2 || control[0] != '\'' {
- return false, ""
- }
-
- var buf bytes.Buffer
- for i := 1; i < len(control); i++ {
- b := control[i]
- if b >= '0' && b <= '9' || b >= 'a' && b <= 'f' || b >= 'A' && b <= 'F' {
- buf.WriteByte(b)
- } else {
- break
- }
- }
- after := control[buf.Len()+1:]
- num, _ := strconv.ParseInt(buf.String(), 16, 16)
- return true, fmt.Sprintf("%c%s", num, after)
-}
-
-func getParams(r peekingReader.Reader) (string, error) {
- data, err := peekingReader.ReadUntilAny(r, []byte{'\\', '{', '}', '\n', '\r', ';'})
- if err != nil {
- return "", err
- }
- p, err := r.Peek(1)
- if err != nil {
- return "", err
- }
- if p[0] == ';' { // skip next if it is a semicolon
- r.ReadByte()
- }
-
- return string(data), nil
-}
-
-func handleBinary(r peekingReader.Reader, control string, size int) error {
- if control != "binN" { // wrong control type
- return nil
- }
-
- _, err := r.ReadBytes(size)
- if err != nil {
- return err
- }
- return nil
-}
-
-func readUntilClosingBrace(r peekingReader.Reader) error {
- count := 1
- var b byte
- var err error
- for b, err = r.ReadByte(); err == nil; b, err = r.ReadByte() {
- switch b {
- case '{':
- count++
- case '}':
- count--
- }
- if count == 0 {
- return nil
- }
- }
- return err
-}
-
-func handleParams(control, param string, text *bytes.Buffer) {
- if strings.HasPrefix(param, " ") {
- param = param[1:]
- }
- if param == "" {
- return
- }
- switch control {
- // Absolution Position Tabs
- // case "pindtabqc", "pindtabql", "pindtabqr", "pmartabqc", "pmartabql", "pmartabqr", "ptabldot", "ptablmdot", "ptablminus", "ptablnone", "ptabluscore":
-
- // Associated Character Properties
- // case "ab","acaps","acfN","adnN","aexpndN","afN","afsN","ai","alangN","aoutl","ascaps","ashad","astrike","aul","auld","auldb","aulnone","aulw","aupN","dbch","fcsN","hich","loch":
-
- // Bookmarks
- // case "bkmkcolfN","bkmkcollN","bkmkend","bkmkstart":
-
- // Bullets and Numbering
- // case "ilvlN","listtext","pn ","pnacross ","pnaiu","pnaiud","pnaiueo","pnaiueod","pnb ","pnbidia","pnbidib","pncaps ","pncard ","pncfN ","pnchosung","pncnum","pndbnum","pndbnumd","pndbnumk","pndbnuml","pndbnumt","pndec ","pndecd","pnfN ","pnfsN ","pnganada","pngbnum","pngbnumd","pngbnumk","pngbnuml","pnhang ","pni ","pnindentN ","pniroha","pnirohad","pnlcltr ","pnlcrm ","pnlvlblt ","pnlvlbody ","pnlvlcont ","pnlvlN ","pnnumonce ","pnord ","pnordt ","pnprev ","pnqc ","pnql ","pnqr ","pnrestart ","pnscaps ","pnspN ","pnstartN ","pnstrike ","pntext ","pntxta ","pntxtb ","pnucltr ","pnucrm ","pnul ","pnuld ","pnuldash","pnuldashd","pnuldashdd","pnuldb ","pnulhair","pnulnone ","pnulth","pnulw ","pnulwave","pnzodiac","pnzodiacd","pnzodiacl":
-
- // Character Borders and Shading
- // case "chbgbdiag","chbgcross","chbgdcross","chbgdkbdiag","chbgdkcross","chbgdkdcross","chbgdkfdiag","chbgdkhoriz","chbgdkvert","chbgfdiag","chbghoriz","chbgvert","chbrdr","chcbpatN","chcfpatN","chshdngN":
-
- // Character Revision Mark Properties
- // case "crauthN","crdateN","deleted","mvauthN ","mvdateN ","mvf","mvt","revauthdelN","revauthN ","revdttmdelN","revdttmN ","revised":
-
- // Character Set
- // case "ansi","ansicpgN","fbidis","mac","pc","pca","impr","striked1":
-
- // Code Page Support
- // case "cpgN":
-
- // Color Scheme Mapping
- // case "colorschememapping":
-
- // Color Table
- // case "blueN","caccentfive","caccentfour","caccentone","caccentsix","caccentthree","caccenttwo","cbackgroundone","cbackgroundtwo","cfollowedhyperlink","chyperlink","cmaindarkone","cmaindarktwo","cmainlightone","cmainlighttwo","colortbl","cshadeN","ctextone","ctexttwo","ctintN","greenN","redN":
-
- // Comments (Annotations)
- // case "annotation","atnauthor","atndate ","atnicn","atnid","atnparent","atnref ","atntime","atrfend ","atrfstart ":
-
- // Control Words Introduced by Other Microsoft Products
- // case "disabled","htmlbase ","htmlrtf","htmltag","mhtmltag","protect","pwdN","urtfN":
-
- // Custom XML Data Properties
- // case "datastore":
-
- // Custom XML Tags
- // case "xmlattr","xmlattrname","xmlattrnsN","xmlattrvalue","xmlclose","xmlname","xmlnstbl","xmlopen","xmlsdttcell","xmlsdttpara","xmlsdttregular","xmlsdttrow","xmlsdttunknown","xmlnsN":
-
- // Default Fonts
- // case "adeffN","adeflangN","deffN","deflangfeN","deflangN","stshfbiN","stshfdbchN","stshfhichN","stshflochN":
-
- // Default Properties
- // case "defchp","defpap":
-
- // Document Formatting Properties
- // case "aenddoc","aendnotes","afelev","aftnbj","aftncn","aftnnalc","aftnnar","aftnnauc","aftnnchi","aftnnchosung","aftnncnum","aftnndbar","aftnndbnum","aftnndbnumd","aftnndbnumk","aftnndbnumt","aftnnganada","aftnngbnum","aftnngbnumd","aftnngbnumk","aftnngbnuml","aftnnrlc ","aftnnruc ","aftnnzodiac","aftnnzodiacd","aftnnzodiacl","aftnrestart ","aftnrstcont ","aftnsep ","aftnsepc ","aftnstartN","aftntj ","allowfieldendsel","allprot ","alntblind","annotprot ","ApplyBrkRules","asianbrkrule","autofmtoverride","background","bdbfhdr","bdrrlswsix","bookfold","bookfoldrev","bookfoldsheetsN","brdrartN","brkfrm ","cachedcolbal","ctsN","cvmme ","defformat","deftabN","dghoriginN","dghshowN","dghspaceN","dgmargin","dgsnap","dgvoriginN","dgvshowN","dgvspaceN","dntblnsbdb","doctemp","doctypeN","donotembedlingdataN","donotembedsysfontN","donotshowcomments","donotshowinsdel","donotshowmarkup","donotshowprops","enddoc","endnotes","enforceprotN","expshrtn","facingp","fchars","felnbrelev","fetN ","forceupgrade","formdisp ","formprot ","formshade ","fracwidth","fromhtmlN","fromtext","ftnalt ","ftnbj","ftncn","ftnlytwnine","ftnnalc ","ftnnar ","ftnnauc ","ftnnchi ","ftnnchosung","ftnncnum","ftnndbar","ftnndbnum","ftnndbnumd","ftnndbnumk","ftnndbnumt","ftnnganada","ftnngbnum","ftnngbnumd","ftnngbnumk","ftnngbnuml","ftnnrlc ","ftnnruc ","ftnnzodiac","ftnnzodiacd","ftnnzodiacl","ftnrestart","ftnrstcont ","ftnrstpg ","ftnsep","ftnsepc","ftnstartN","ftntj","grfdoceventsN","gutterN","gutterprl","horzdoc","htmautsp","hwelev2007","hyphauto ","hyphcaps ","hyphconsecN ","hyphhotzN","ignoremixedcontentN","ilfomacatclnupN","indrlsweleven","jcompress","jexpand","jsksu","krnprsnet","ksulangN","landscape","lchars","linestartN","linkstyles ","lnbrkrule","lnongrid","ltrdoc","lytcalctblwd","lytexcttp","lytprtmet","lyttblrtgr","makebackup","margbN","marglN","margmirror","margrN","margtN","msmcap","muser","newtblstyruls","nextfile","noafcnsttbl","nobrkwrptbl","nocolbal ","nocompatoptions","nocxsptable","noextrasprl ","nofeaturethrottle","nogrowautofit","noindnmbrts","nojkernpunct","nolead","nolnhtadjtbl","nospaceforul","notabind ","notbrkcnstfrctbl","notcvasp","notvatxbx","nouicompat","noultrlspc","noxlattoyen","ogutterN","oldas","oldlinewrap","otblrul ","paperhN","paperwN","pgbrdrb","pgbrdrfoot","pgbrdrhead","pgbrdrl","pgbrdroptN","pgbrdrr","pgbrdrsnap","pgbrdrt","pgnstartN","prcolbl ","printdata ","private","protlevelN","psover","pszN ","readonlyrecommended","readprot","relyonvmlN","remdttm","rempersonalinfo","revbarN","revisions","revpropN","revprot ","rtldoc","rtlgutter","saveinvalidxml","saveprevpict","showplaceholdtextN","showxmlerrorsN","snaptogridincell","spltpgpar","splytwnine","sprsbsp","sprslnsp","sprsspbf ","sprstsm","sprstsp ","stylelock","stylelockbackcomp","stylelockenforced","stylelockqfset","stylelocktheme","stylesortmethodN","subfontbysize","swpbdr ","template","themelangcsN","themelangfeN","themelangN","toplinepunct","trackformattingN","trackmovesN","transmf ","truncatefontheight","truncex","tsd","twoonone","useltbaln","usenormstyforlist","usexform","utinl","validatexmlN","vertdoc","viewbkspN","viewkindN","viewnobound","viewscaleN","viewzkN","wgrffmtfilter","widowctrl","windowcaption","wpjst","wpsp","wraptrsp ","writereservation","writereservhash","wrppunct","xform":
-
- // Document Variables
- // case "docvar":
-
- // Drawing Object Properties
- // case "hl","hlfr","hlloc","hlsrc","hrule","hsv":
-
- // Drawing Objects
- // case "do ","dobxcolumn ","dobxmargin ","dobxpage ","dobymargin ","dobypage ","dobypara ","dodhgtN ","dolock ","dpaendhol ","dpaendlN ","dpaendsol ","dpaendwN ","dparc ","dparcflipx ","dparcflipy ","dpastarthol ","dpastartlN ","dpastartsol ","dpastartwN ","dpcallout ","dpcoaccent ","dpcoaN ","dpcobestfit ","dpcoborder ","dpcodabs","dpcodbottom ","dpcodcenter ","dpcodescentN","dpcodtop ","dpcolengthN ","dpcominusx ","dpcominusy ","dpcooffsetN ","dpcosmarta ","dpcotdouble ","dpcotright ","dpcotsingle ","dpcottriple ","dpcountN ","dpellipse ","dpendgroup ","dpfillbgcbN ","dpfillbgcgN ","dpfillbgcrN ","dpfillbggrayN ","dpfillbgpal ","dpfillfgcbN ","dpfillfgcgN ","dpfillfgcrN ","dpfillfggrayN ","dpfillfgpal ","dpfillpatN ","dpgroup ","dpline ","dplinecobN ","dplinecogN ","dplinecorN ","dplinedado ","dplinedadodo ","dplinedash ","dplinedot ","dplinegrayN ","dplinehollow ","dplinepal ","dplinesolid ","dplinewN ","dppolycountN ","dppolygon ","dppolyline ","dpptxN ","dpptyN ","dprect ","dproundr ","dpshadow ","dpshadxN ","dpshadyN ","dptxbtlr","dptxbx ","dptxbxmarN ","dptxbxtext ","dptxlrtb","dptxlrtbv","dptxtbrl","dptxtbrlv","dpxN ","dpxsizeN ","dpyN ","dpysizeN ":
-
- // East Asian Control Words
- // case "cgridN","g","gcwN","gridtbl","nosectexpand","ulhair":
-
- // Fields
- // case "datafield ","date","field","fldalt ","flddirty","fldedit","fldinst","fldlock","fldpriv","fldrslt","fldtype","time","wpeqn":
- case "fldrslt":
- text.WriteString(param)
-
- // File Table
- // case "fidN ","file ","filetbl ","fnetwork ","fnonfilesys","fosnumN ","frelativeN ","fvaliddos ","fvalidhpfs ","fvalidmac ","fvalidntfs ":
-
- // Font (Character) Formatting Properties
- case "acccircle", "acccomma", "accdot", "accnone", "accunderdot", "animtextN", "b", "caps", "cbN", "cchsN ", "cfN", "charscalexN", "csN", "dnN", "embo", "expndN", "expndtwN ", "fittextN", "fN", "fsN", "i", "kerningN ", "langfeN", "langfenpN", "langN", "langnpN", "ltrch", "noproof", "nosupersub ", "outl", "plain", "rtlch", "scaps", "shad", "strike", "sub ", "super ", "ul", "ulcN", "uld", "uldash", "uldashd", "uldashdd", "uldb", "ulhwave", "ulldash", "ulnone", "ulth", "ulthd", "ulthdash", "ulthdashd", "ulthdashdd", "ulthldash", "ululdbwave", "ulw", "ulwave", "upN", "v", "webhidden":
- text.WriteString(param)
-
- // Font Family
- // case "fjgothic","fjminchou","jis","falt ","fbiasN","fbidi","fcharsetN","fdecor","fetch","fmodern","fname","fnil","fontemb","fontfile","fonttbl","fprqN ","froman","fscript","fswiss","ftech","ftnil","fttruetype","panose":
-
- // Footnotes
- // case "footnote":
-
- // Form Fields
- // case "ffdefresN","ffdeftext","ffentrymcr","ffexitmcr","ffformat","ffhaslistboxN","ffhelptext","ffhpsN","ffl","ffmaxlenN","ffname","ffownhelpN","ffownstatN","ffprotN","ffrecalcN","ffresN","ffsizeN","ffstattext","fftypeN","fftypetxtN","formfield":
-
- // Generator
- // case "generator":
-
- // Headers and Footers
- // case "footer","footerf","footerl","footerr","header","headerf","headerl","headerr":
-
- // Highlighting
- // case "highlightN":
-
- // Hyphenation Information
- // case "chhresN","hresN":
-
- // Index Entries
- // case "bxe","ixe","pxe","rxe","txe","xe","xefN","yxe":
-
- // Information Group
- // case "author","buptim","category","comment","company","creatim","doccomm","dyN","edminsN","hlinkbase","hrN","idN","info","keywords","linkval","manager","minN","moN","nofcharsN","nofcharswsN","nofpagesN","nofwordsN","operator","printim","propname","proptypeN","revtim","secN","staticval","subject","title","userprops","vernN","versionN","yrN":
-
- // List Levels
- // case "lvltentative":
-
- // List Table
- // case "jclisttab","levelfollowN","levelindentN","leveljcN","leveljcnN","levellegalN","levelnfcN","levelnfcnN","levelnorestartN","levelnumbers","leveloldN","levelpictureN","levelpicturenosize","levelprevN","levelprevspaceN","levelspaceN","levelstartatN","leveltemplateidN","leveltext","lfolevel","list","listhybrid","listidN","listlevel","listname","listoverride","listoverridecountN","listoverrideformatN","listoverridestartat","listoverridetable","listpicture","listrestarthdnN","listsimpleN","liststyleidN","liststylename","listtable","listtemplateidN","lsN":
-
- // Macintosh Edition Manager Publisher Objects
- // case "bkmkpub","pubauto":
-
- // Mail Merge
- // case "mailmerge","mmaddfieldname","mmattach","mmblanklines","mmconnectstr","mmconnectstrdata","mmdatasource","mmdatatypeaccess","mmdatatypeexcel","mmdatatypefile","mmdatatypeodbc","mmdatatypeodso","mmdatatypeqt","mmdefaultsql","mmdestemail","mmdestfax","mmdestnewdoc 2 007","mmdestprinter","mmerrorsN","mmfttypeaddress","mmfttypebarcode","mmfttypedbcolumn","mmfttypemapped","mmfttypenull","mmfttypesalutation","mmheadersource","mmjdsotypeN","mmlinktoquery","mmmailsubject","mmmaintypecatalog","mmmaintypeemail","mmmaintypeenvelopes","mmmaintypefax","mmmaintypelabels","mmmaintypeletters","mmodso","mmodsoactiveN","mmodsocoldelimN","mmodsocolumnN","mmodsodynaddrN","mmodsofhdrN","mmodsofilter","mmodsofldmpdata","mmodsofmcolumnN","mmodsohashN","mmodsolidN","mmodsomappedname","mmodsoname","mmodsorecipdata","mmodsosort","mmodsosrc ","mmodsotable","mmodsoudl","mmodsoudldata 200 7","mmodsouniquetag","mmquery","mmreccurN","mmshowdata":
-
- // Math
- // case "macc","maccPr","maln","malnScr","margPr","margSzN","mbar","mbarPr","mbaseJc","mbegChr","mborderBox","mborderBoxPr","mbox","mboxPr","mbrkBinN","mbrkBinSubN","mbrkN","mcGpN","mcGpRuleN","mchr","mcount","mcSpN","mctrlPr","md","mdefJcN","mdeg","mdegHide","mden","mdiff","mdiffStyN","mdispdefN","mdPr","me","mendChr","meqArr","meqArrPr","mf","mfName","mfPr","mfunc","mfuncPr","mgroupChr","mgroupChrPr","mgrow","mhideBot","mhideLeft","mhideRight","mhideTop","minterSpN","mintLimN","mintraSpN","mjcN","mlim","mlimloc","mlimlow","mlimlowPr","mlimupp","mlimuppPr","mlit","mlMarginN","mm","mmath","mmathFontN","mmathPict","mmathPr","mmaxdist","mmc","mmcJc","mmcPr","mmcs","mmPr","mmr","mnary","mnaryLimN","mnaryPr","mnoBreak","mnor","mnum","mobjDist","moMath","moMathPara","moMathParaPr","mopEmu","mphant","mphantPr","mplcHide","mpos","mpostSpN","mpreSpN","mr","mrad","mradPr","mrMarginN","mrPr","mrSpN","mrSpRuleN","mscrN","msepChr","mshow","mshp","msmallFracN","msPre","msPrePr","msSub","msSubPr","msSubSup","msSubSupPr","msSup","msSupPr","mstrikeBLTR","mstrikeH","mstrikeTLBR","mstrikeV","mstyN","msub","msubHide","msup","msupHide","mtransp","mtype","mvertJc","mwrapIndentN","mwrapRightN","mzeroAsc","mzeroDesc","mzeroWid":
-
- // Microsoft Office Outlook
- // case "ebcstart","ebcend":
-
- // Move Bookmarks
- // case "mvfmf","mvfml","mvtof","mvtol":
-
- // New Asia Control Words Created by Word
- // case "horzvertN","twoinoneN":
-
- // Objects
- // case "linkself","objalias","objalignN","objattph","objautlink","objclass","objcropbN","objcroplN","objcroprN","objcroptN","objdata","object","objemb","objhN","objhtml","objicemb","objlink","objlock","objname","objocx","objpub","objscalexN","objscaleyN","objsect","objsetsize","objsub","objtime","objtransyN","objupdate ","objwN","oleclsid","result","rsltbmp","rslthtml","rsltmerge","rsltpict","rsltrtf","rslttxt":
-
- // Paragraph Borders
- // case "box","brdrb","brdrbar","brdrbtw","brdrcfN","brdrdash ","brdrdashd","brdrdashdd","brdrdashdot","brdrdashdotdot","brdrdashdotstr","brdrdashsm","brdrdb","brdrdot","brdremboss","brdrengrave","brdrframe","brdrhair","brdrinset","brdrl","brdrnil","brdrnone","brdroutset","brdrr","brdrs","brdrsh","brdrt","brdrtbl","brdrth","brdrthtnlg","brdrthtnmg","brdrthtnsg","brdrtnthlg","brdrtnthmg","brdrtnthsg","brdrtnthtnlg","brdrtnthtnmg","brdrtnthtnsg","brdrtriple","brdrwavy","brdrwavydb","brdrwN","brspN":
-
- // Paragraph Formatting Properties
- case "aspalpha", "aspnum", "collapsed", "contextualspace", "cufiN", "culiN", "curiN", "faauto", "facenter", "fafixed", "fahang", "faroman", "favar", "fiN", "hyphpar ", "indmirror", "intbl", "itapN", "keep", "keepn", "levelN", "liN", "linN", "lisaN", "lisbN", "ltrpar", "nocwrap", "noline", "nooverflow", "nosnaplinegrid", "nowidctlpar ", "nowwrap", "outlinelevelN ", "pagebb", "pard", "prauthN", "prdateN", "qc", "qd", "qj", "qkN", "ql", "qr", "qt", "riN", "rinN", "rtlpar", "saautoN", "saN", "sbautoN", "sbN", "sbys", "slmultN", "slN", "sN", "spv", "subdocumentN ", "tscbandhorzeven", "tscbandhorzodd", "tscbandverteven", "tscbandvertodd", "tscfirstcol", "tscfirstrow", "tsclastcol", "tsclastrow", "tscnecell", "tscnwcell", "tscsecell", "tscswcell", "txbxtwalways", "txbxtwfirst", "txbxtwfirstlast", "txbxtwlast", "txbxtwno", "widctlpar", "ytsN":
- text.WriteString(param)
-
- // Paragraph Group Properties
- // case "pgp","pgptbl","ipgpN":
-
- // Paragraph Revision Mark Properties
- // case "dfrauthN","dfrdateN","dfrstart","dfrstop","dfrxst":
-
- // Paragraph Shading
- // case "bgbdiag","bgcross","bgdcross","bgdkbdiag","bgdkcross","bgdkdcross","bgdkfdiag","bgdkhoriz","bgdkvert","bgfdiag","bghoriz","bgvert","cbpatN","cfpatN","shadingN":
-
- // Pictures
- // case "binN","bliptagN","blipuid","blipupiN","defshp","dibitmapN","emfblip","jpegblip","macpict","nonshppict","picbmp ","picbppN ","piccropbN","piccroplN","piccroprN","piccroptN","pichgoalN","pichN","picprop","picscaled","picscalexN","picscaleyN","pict","picwgoalN","picwN","pmmetafileN","pngblip","shppict","wbitmapN","wbmbitspixelN","wbmplanesN","wbmwidthbyteN","wmetafileN":
-
- // Positioned Objects and Frames
- // case "abshN","abslock","absnoovrlpN","abswN","dfrmtxtxN","dfrmtxtyN","dropcapliN ","dropcaptN ","dxfrtextN","frmtxbtlr","frmtxlrtb","frmtxlrtbv","frmtxtbrl","frmtxtbrlv","nowrap","overlay","phcol","phmrg","phpg","posnegxN ","posnegyN ","posxc","posxi","posxl","posxN","posxo","posxr","posyb","posyc","posyil","posyin","posyN","posyout","posyt","pvmrg","pvpara","pvpg","wraparound","wrapdefault","wrapthrough","wraptight":
-
- // Protection Exceptions
- // case "protend","protstart":
-
- // Quick Styles
- // case "noqfpromote":
-
- // Read-Only Password Protection
- // case "password","passwordhash":
-
- // Revision Marks for Paragraph Numbers and ListNum Fields
- // case "pnrauthN","pnrdateN","pnrnfcN","pnrnot","pnrpnbrN","pnrrgbN","pnrstartN","pnrstopN","pnrxstN":
-
- // RTF Version
- // case "rtfN":
-
- // Section Formatting Properties
- case "adjustright", "binfsxnN", "binsxnN", "colnoN ", "colsN", "colsrN ", "colsxN", "colwN ", "dsN", "endnhere", "footeryN", "guttersxnN", "headeryN", "horzsect", "linebetcol", "linecont", "linemodN", "lineppage", "linerestart", "linestartsN", "linexN", "lndscpsxn", "ltrsect", "margbsxnN", "marglsxnN", "margmirsxn", "margrsxnN", "margtsxnN", "pghsxnN", "pgnbidia", "pgnbidib", "pgnchosung", "pgncnum", "pgncont", "pgndbnum", "pgndbnumd", "pgndbnumk", "pgndbnumt", "pgndec", "pgndecd", "pgnganada", "pgngbnum", "pgngbnumd", "pgngbnumk", "pgngbnuml", "pgnhindia", "pgnhindib", "pgnhindic", "pgnhindid", "pgnhnN ", "pgnhnsc ", "pgnhnsh ", "pgnhnsm ", "pgnhnsn ", "pgnhnsp ", "pgnid", "pgnlcltr", "pgnlcrm", "pgnrestart", "pgnstartsN", "pgnthaia", "pgnthaib", "pgnthaic", "pgnucltr", "pgnucrm", "pgnvieta", "pgnxN", "pgnyN", "pgnzodiac", "pgnzodiacd", "pgnzodiacl", "pgwsxnN", "pnseclvlN", "rtlsect", "saftnnalc", "saftnnar", "saftnnauc", "saftnnchi", "saftnnchosung", "saftnncnum", "saftnndbar", "saftnndbnum", "saftnndbnumd", "saftnndbnumk", "saftnndbnumt", "saftnnganada", "saftnngbnum", "saftnngbnumd", "saftnngbnumk", "saftnngbnuml", "saftnnrlc", "saftnnruc", "saftnnzodiac", "saftnnzodiacd", "saftnnzodiacl", "saftnrestart", "saftnrstcont", "saftnstartN", "sbkcol", "sbkeven", "sbknone", "sbkodd", "sbkpage", "sectd", "sectdefaultcl", "sectexpandN", "sectlinegridN", "sectspecifycl", "sectspecifygenN", "sectspecifyl", "sectunlocked", "sftnbj", "sftnnalc", "sftnnar", "sftnnauc", "sftnnchi", "sftnnchosung", "sftnncnum", "sftnndbar", "sftnndbnum", "sftnndbnumd", "sftnndbnumk", "sftnndbnumt", "sftnnganada", "sftnngbnum", "sftnngbnumd", "sftnngbnumk", "sftnngbnuml", "sftnnrlc", "sftnnruc", "sftnnzodiac", "sftnnzodiacd", "sftnnzodiacl", "sftnrestart", "sftnrstcont", "sftnrstpg", "sftnstartN", "sftntj", "srauthN", "srdateN", "titlepg", "vertal", "vertalb", "vertalc", "vertalj", "vertalt", "vertsect":
- text.WriteString(param)
-
- // Section Text
- case "stextflowN":
- text.WriteString(param)
-
- // SmartTag Data
- // case "factoidname":
-
- // Special Characters
- case "-", ":", "_", "{", "|", "}", "~", "bullet", "chatn", "chdate", "chdpa", "chdpl", "chftn", "chftnsep", "chftnsepc", "chpgn", "chtime", "column", "emdash", "emspace ", "endash", "enspace ", "lbrN", "ldblquote", "line", "lquote", "ltrmark", "page", "par", "qmspace", "rdblquote", "row", "rquote", "rtlmark", "sect", "sectnum", "softcol ", "softlheightN ", "softline ", "softpage ", "tab", "zwbo", "zwj", "zwnbo", "zwnj":
- text.WriteString(param)
-
- // Style and Formatting Restrictions
- // case "latentstyles","lsdlockeddefN","lsdlockedexcept","lsdlockedN","lsdprioritydefN","lsdpriorityN","lsdqformatdefN","lsdqformatN","lsdsemihiddendefN","lsdsemihiddenN","lsdstimaxN","lsdunhideuseddefN","lsdunhideusedN":
-
- // Style Sheet
- // case "additive","alt","ctrl","fnN","keycode","sautoupd","sbasedonN","scompose","shidden","shift","slinkN","slocked","snextN","spersonal","spriorityN","sqformat","sreply","ssemihiddenN","stylesheet","styrsidN","sunhideusedN","tsN","tsrowd":
-
- // Table Definitions
- case "cell", "cellxN", "clbgbdiag", "clbgcross", "clbgdcross", "clbgdkbdiag", "clbgdkcross", "clbgdkdcross", "clbgdkfdiag", "clbgdkhor", "clbgdkvert", "clbgfdiag", "clbghoriz", "clbgvert", "clbrdrb", "clbrdrl", "clbrdrr", "clbrdrt", "clcbpatN", "clcbpatrawN", "clcfpatN", "clcfpatrawN", "cldel2007", "cldelauthN", "cldeldttmN", "cldgll", "cldglu", "clFitText", "clftsWidthN", "clhidemark", "clins", "clinsauthN", "clinsdttmN", "clmgf", "clmrg", "clmrgd", "clmrgdauthN", "clmrgddttmN", "clmrgdr", "clNoWrap", "clpadbN", "clpadfbN", "clpadflN", "clpadfrN", "clpadftN", "clpadlN", "clpadrN", "clpadtN", "clshdngN", "clshdngrawN", "clshdrawnil", "clspbN", "clspfbN", "clspflN", "clspfrN", "clspftN", "clsplit", "clsplitr", "clsplN", "clsprN", "clsptN", "cltxbtlr", "cltxlrtb", "cltxlrtbv", "cltxtbrl", "cltxtbrlv", "clvertalb", "clvertalc", "clvertalt", "clvmgf", "clvmrg", "clwWidthN", "irowbandN", "irowN", "lastrow", "ltrrow", "nestcell", "nestrow", "nesttableprops", "nonesttables", "rawclbgbdiag", "rawclbgcross", "rawclbgdcross", "rawclbgdkbdiag", "rawclbgdkcross", "rawclbgdkdcross", "rawclbgdkfdiag", "rawclbgdkhor", "rawclbgdkvert", "rawclbgfdiag", "rawclbghoriz", "rawclbgvert", "rtlrow", "tabsnoovrlp", "taprtl", "tblindN", "tblindtypeN", "tbllkbestfit", "tbllkborder", "tbllkcolor", "tbllkfont", "tbllkhdrcols", "tbllkhdrrows", "tbllklastcol", "tbllklastrow", "tbllknocolband", "tbllknorowband", "tbllkshading", "tcelld", "tdfrmtxtBottomN", "tdfrmtxtLeftN", "tdfrmtxtRightN", "tdfrmtxtTopN", "tphcol", "tphmrg", "tphpg", "tposnegxN", "tposnegyN", "tposxc", "tposxi", "tposxl", "tposxN", "tposxo", "tposxr", "tposyb", "tposyc", "tposyil", "tposyin", "tposyN", "tposyout", "tposyt", "tpvmrg", "tpvpara", "tpvpg", "trauthN", "trautofitN", "trbgbdiag", "trbgcross", "trbgdcross", "trbgdkbdiag", "trbgdkcross", "trbgdkdcross", "trbgdkfdiag", "trbgdkhor", "trbgdkvert", "trbgfdiag", "trbghoriz", "trbgvert", "trbrdrb ", "trbrdrh ", "trbrdrl ", "trbrdrr ", "trbrdrt ", "trbrdrv ", "trcbpatN", "trcfpatN", "trdateN", "trftsWidthAN", "trftsWidthBN", "trftsWidthN", "trgaphN", "trhdr ", "trkeep ", "trkeepfollow", "trleftN", "trowd", "trpaddbN", "trpaddfbN", "trpaddflN", "trpaddfrN", "trpaddftN", "trpaddlN", "trpaddrN", "trpaddtN", "trpadobN", "trpadofbN", "trpadoflN", "trpadofrN", "trpadoftN", "trpadolN", "trpadorN", "trpadotN", "trpatN", "trqc", "trql", "trqr", "trrhN", "trshdngN", "trspdbN", "trspdfbN", "trspdflN", "trspdfrN", "trspdftN", "trspdlN", "trspdrN", "trspdtN", "trspobN", "trspofbN", "trspoflN", "trspofrN", "trspoftN", "trspolN", "trsporN", "trspotN", "trwWidthAN", "trwWidthBN", "trwWidthN":
- text.WriteString(param)
-
- // Table of Contents Entries
- case "tc", "tcfN", "tclN", "tcn ":
- text.WriteString(param)
-
- // Table Styles
- // case "tsbgbdiag","tsbgcross","tsbgdcross","tsbgdkbdiag","tsbgdkcross","tsbgdkdcross","tsbgdkfdiag","tsbgdkhor","tsbgdkvert","tsbgfdiag","tsbghoriz","tsbgvert","tsbrdrb","tsbrdrdgl","tsbrdrdgr","tsbrdrh","tsbrdrl","tsbrdrr","tsbrdrr","tsbrdrt","tsbrdrv","tscbandshN","tscbandsvN","tscellcbpatN","tscellcfpatN","tscellpaddbN","tscellpaddfbN","tscellpaddflN","tscellpaddfrN","tscellpaddftN","tscellpaddlN","tscellpaddrN","tscellpaddtN","tscellpctN","tscellwidthftsN","tscellwidthN","tsnowrap","tsvertalb","tsvertalc","tsvertalt":
-
- // Tabs
- case "tbN", "tldot", "tleq", "tlhyph", "tlmdot", "tlth", "tlul", "tqc", "tqdec", "tqr", "txN":
- text.WriteString(param)
-
- // Theme Data
- // case "themedata":
-
- // Theme Font Information
- // case "fbimajor","fbiminor","fdbmajor","fdbminor","fhimajor","fhiminor","flomajor","flominor":
-
- // Track Changes
- // case "revtbl ":
-
- // Track Changes (Revision Marks)
- // case "charrsidN","delrsidN","insrsidN","oldcprops","oldpprops","oldsprops","oldtprops","pararsidN","rsidN","rsidrootN","rsidtbl","sectrsidN","tblrsidN":
-
- // Unicode RTF
- // case "ucN","ud","uN","upr":
-
- // User Protection Information
- // case "protusertbl":
-
- // Word through Word RTF for Drawing Objects (Shapes)
- // case "shp","shpbottomN","shpbxcolumn","shpbxignore","shpbxmargin","shpbxpage","shpbyignore","shpbymargin","shpbypage","shpbypara","shpfblwtxtN","shpfhdrN","shpgrp","shpinst","shpleftN","shplidN","shplockanchor","shprightN","shprslt","shptopN","shptxt","shpwrkN","shpwrN","shpzN","sn","sp","sv","svb":
- default:
- }
-}
-
-func convertSymbol(symbol string) (string, bool) {
- switch symbol {
- case "bullet":
- return "*", true
- case "chdate", "chdpa", "chdpl":
- return time.Now().Format("2005-01-02"), true
- case "chtime":
- return time.Now().Format("4:56 pm"), true
- case "emdash", "endash":
- return "-", true
- case "lquote", "rquote":
- return "'", true
- case "ldblquote", "rdblquote":
- return "\"", true
- case "line", "lbrN":
- return "\n", true
- case "cell", "column", "emspace", "enspace", "qmspace", "nestcell", "nestrow", "page", "par", "row", "sect", "tab":
- return " ", true
- case "|", "~", "-", "_", ":":
- return symbol, true
- case "chatn", "chftn", "chftnsep", "chftnsepc", "chpgn", "sectnum", "ltrmark", "rtlmark", "zwbo", "zwj", "zwnbo", "zwnj", "softcol",
- "softline", "softpage":
- return "", true
- default:
- return "", false
- }
-}
diff --git a/godo/ai/convert/txt.go b/godo/ai/convert/txt.go
deleted file mode 100644
index adcc424..0000000
--- a/godo/ai/convert/txt.go
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * GodoOS - A lightweight cloud desktop
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package convert
-
-import (
- "io"
-)
-
-func ConvertTxt(r io.Reader) (string, error) {
-
- b, err := io.ReadAll(r)
- if err != nil {
- return "", err
- }
- return string(b), nil
-}
diff --git a/godo/ai/convert/url.go b/godo/ai/convert/url.go
deleted file mode 100644
index 6780fe3..0000000
--- a/godo/ai/convert/url.go
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * GodoOS - A lightweight cloud desktop
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package convert
-
-import (
- "fmt"
- "io"
- "net/http"
-
- "jaytaylor.com/html2text"
-)
-
-func resErr(err error) Res {
- return Res{
- Status: 201,
- Data: fmt.Sprintf("error opening file: %v", err),
- }
-}
-func ConvertHttp(url string) Res {
- resp, err := http.Get(url)
- if err != nil {
- return resErr(err)
- }
- defer resp.Body.Close()
-
- body, errRead := io.ReadAll(resp.Body)
- if errRead != nil {
- return resErr(errRead)
- }
- text, err := html2text.FromString(string(body), html2text.Options{PrettyTables: false})
- if err != nil {
- return resErr(err)
- }
- return Res{
- Status: 0,
- Data: text,
- }
-}
diff --git a/godo/ai/convert/xlsx.go b/godo/ai/convert/xlsx.go
deleted file mode 100644
index c270a2f..0000000
--- a/godo/ai/convert/xlsx.go
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * GodoOS - A lightweight cloud desktop
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package convert
-
-import (
- "io"
- "strings"
-
- "godo/ai/convert/libs"
-
- "github.com/pbnjay/grate"
- _ "github.com/pbnjay/grate/simple" // tsv and csv support
- _ "github.com/pbnjay/grate/xls"
- _ "github.com/pbnjay/grate/xlsx"
-)
-
-// 返回行索引 列索引
-func ConvertXlsx(r io.Reader) (string, error) {
- absFileFrom, tmpfromfile, err := libs.GetTempFile(r, "prefix-xlsx-from")
- if err != nil {
- return "", err
- }
- textByRow := ""
- textByColumn := ""
-
- wb, _ := grate.Open(absFileFrom) // open the file
- sheets, _ := wb.List() // list available sheets
-
- // 用于存储每一列的内容
- columns := make([][]string, 0)
-
- for _, s := range sheets { // enumerate each sheet name
- sheet, _ := wb.Get(s) // open the sheet
- maxColumns := 0
- for sheet.Next() { // enumerate each row of data
- row := sheet.Strings() // get the row's content as []string
-
- // 更新最大列数
- if len(row) > maxColumns {
- maxColumns = len(row)
- }
-
- // 跳过空记录
- if len(row) == 0 {
- continue
- }
-
- textByRow += strings.Join(row, "\t") + "\n"
-
- // 初始化列切片
- if len(columns) < maxColumns {
- columns = make([][]string, maxColumns)
- }
-
- // 将每一列的内容添加到对应的列切片中
- for i, cell := range row {
- columns[i] = append(columns[i], cell)
- }
- }
- }
-
- // 拼接每一列的内容
- for _, col := range columns {
- textByColumn += strings.Join(col, "\n") + "\n"
- }
-
- wb.Close()
- libs.CloseTempFile(tmpfromfile)
- return textByRow + "\n\n" + textByColumn, nil
-}
diff --git a/godo/ai/server/chat.go b/godo/ai/server/chat.go
index 65aebf9..da2504a 100644
--- a/godo/ai/server/chat.go
+++ b/godo/ai/server/chat.go
@@ -3,7 +3,6 @@ package server
import (
"encoding/json"
"godo/libs"
- "log"
"net/http"
)
@@ -17,8 +16,8 @@ func ChatHandler(w http.ResponseWriter, r *http.Request) {
return
}
headers, url, err := GetHeadersAndUrl(req, "chat")
- log.Printf("url: %s", url)
- log.Printf("headers: %v", headers)
+ // log.Printf("url: %s", url)
+ // log.Printf("headers: %v", headers)
if err != nil {
libs.ErrorMsg(w, err.Error())
return
diff --git a/godo/ai/server/llms.go b/godo/ai/server/llms.go
index ce94137..3b895e1 100644
--- a/godo/ai/server/llms.go
+++ b/godo/ai/server/llms.go
@@ -69,6 +69,10 @@ func GetHeadersAndUrl(req map[string]interface{}, chattype string) (map[string]s
typeUrl = "/images/generations"
}
+ } else if chattype == "text2voice" {
+
+ } else if chattype == "voice2text" {
+
}
return headers, url + typeUrl, nil
diff --git a/godo/cmd/main.go b/godo/cmd/main.go
index 98be2fa..52505d1 100644
--- a/godo/cmd/main.go
+++ b/godo/cmd/main.go
@@ -47,11 +47,6 @@ func OsStart() {
log.Fatalf("InitOsSystem error: %v", err)
return
}
- err = deps.InitDir()
- if err != nil {
- log.Fatalf("Init Dir error: %v", err)
- return
- }
webdav.InitWebdav()
router := mux.NewRouter()
router.Use(recoverMiddleware)
diff --git a/godo/deps/darwin.go b/godo/deps/darwin.go
deleted file mode 100644
index c15bdc7..0000000
--- a/godo/deps/darwin.go
+++ /dev/null
@@ -1,27 +0,0 @@
-//go:build darwin
-
-/*
- * GodoAI - A software focused on localizing AI applications
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package deps
-
-import (
- _ "embed"
-)
-
-//go:embed darwin.zip
-var embeddedZip []byte
diff --git a/godo/deps/darwin/goconv/pdf/pdfimages b/godo/deps/darwin/goconv/pdf/pdfimages
deleted file mode 100644
index fb21ce9..0000000
Binary files a/godo/deps/darwin/goconv/pdf/pdfimages and /dev/null differ
diff --git a/godo/deps/darwin/goconv/pdf/pdftohtml b/godo/deps/darwin/goconv/pdf/pdftohtml
deleted file mode 100644
index f75998d..0000000
Binary files a/godo/deps/darwin/goconv/pdf/pdftohtml and /dev/null differ
diff --git a/godo/deps/darwin/goconv/pdf/pdftopng b/godo/deps/darwin/goconv/pdf/pdftopng
deleted file mode 100644
index ef3acad..0000000
Binary files a/godo/deps/darwin/goconv/pdf/pdftopng and /dev/null differ
diff --git a/godo/deps/darwin/goconv/rapid/RapidOcrOnnx b/godo/deps/darwin/goconv/rapid/RapidOcrOnnx
deleted file mode 100644
index e78f04c..0000000
Binary files a/godo/deps/darwin/goconv/rapid/RapidOcrOnnx and /dev/null differ
diff --git a/godo/deps/darwin/goconv/rapid/models/ch_PP-OCRv4_det_infer-v7.onnx b/godo/deps/darwin/goconv/rapid/models/ch_PP-OCRv4_det_infer-v7.onnx
deleted file mode 100644
index be54729..0000000
Binary files a/godo/deps/darwin/goconv/rapid/models/ch_PP-OCRv4_det_infer-v7.onnx and /dev/null differ
diff --git a/godo/deps/darwin/goconv/rapid/models/ch_PP-OCRv4_rec_infer-v7.onnx b/godo/deps/darwin/goconv/rapid/models/ch_PP-OCRv4_rec_infer-v7.onnx
deleted file mode 100644
index 9522c96..0000000
Binary files a/godo/deps/darwin/goconv/rapid/models/ch_PP-OCRv4_rec_infer-v7.onnx and /dev/null differ
diff --git a/godo/deps/darwin/goconv/rapid/models/ch_ppocr_mobile_v2.0_cls_infer.onnx b/godo/deps/darwin/goconv/rapid/models/ch_ppocr_mobile_v2.0_cls_infer.onnx
deleted file mode 100644
index 6b9d102..0000000
Binary files a/godo/deps/darwin/goconv/rapid/models/ch_ppocr_mobile_v2.0_cls_infer.onnx and /dev/null differ
diff --git a/godo/deps/darwin/goconv/rapid/models/ppocr_keys_v1.txt b/godo/deps/darwin/goconv/rapid/models/ppocr_keys_v1.txt
deleted file mode 100644
index 84b885d..0000000
--- a/godo/deps/darwin/goconv/rapid/models/ppocr_keys_v1.txt
+++ /dev/null
@@ -1,6623 +0,0 @@
-'
-疗
-绚
-诚
-娇
-溜
-题
-贿
-者
-廖
-更
-纳
-加
-奉
-公
-一
-就
-汴
-计
-与
-路
-房
-原
-妇
-2
-0
-8
--
-7
-其
->
-:
-]
-,
-,
-骑
-刈
-全
-消
-昏
-傈
-安
-久
-钟
-嗅
-不
-影
-处
-驽
-蜿
-资
-关
-椤
-地
-瘸
-专
-问
-忖
-票
-嫉
-炎
-韵
-要
-月
-田
-节
-陂
-鄙
-捌
-备
-拳
-伺
-眼
-网
-盎
-大
-傍
-心
-东
-愉
-汇
-蹿
-科
-每
-业
-里
-航
-晏
-字
-平
-录
-先
-1
-3
-彤
-鲶
-产
-稍
-督
-腴
-有
-象
-岳
-注
-绍
-在
-泺
-文
-定
-核
-名
-水
-过
-理
-让
-偷
-率
-等
-这
-发
-”
-为
-含
-肥
-酉
-相
-鄱
-七
-编
-猥
-锛
-日
-镀
-蒂
-掰
-倒
-辆
-栾
-栗
-综
-涩
-州
-雌
-滑
-馀
-了
-机
-块
-司
-宰
-甙
-兴
-矽
-抚
-保
-用
-沧
-秩
-如
-收
-息
-滥
-页
-疑
-埠
-!
-!
-姥
-异
-橹
-钇
-向
-下
-跄
-的
-椴
-沫
-国
-绥
-獠
-报
-开
-民
-蜇
-何
-分
-凇
-长
-讥
-藏
-掏
-施
-羽
-中
-讲
-派
-嘟
-人
-提
-浼
-间
-世
-而
-古
-多
-倪
-唇
-饯
-控
-庚
-首
-赛
-蜓
-味
-断
-制
-觉
-技
-替
-艰
-溢
-潮
-夕
-钺
-外
-摘
-枋
-动
-双
-单
-啮
-户
-枇
-确
-锦
-曜
-杜
-或
-能
-效
-霜
-盒
-然
-侗
-电
-晁
-放
-步
-鹃
-新
-杖
-蜂
-吒
-濂
-瞬
-评
-总
-隍
-对
-独
-合
-也
-是
-府
-青
-天
-诲
-墙
-组
-滴
-级
-邀
-帘
-示
-已
-时
-骸
-仄
-泅
-和
-遨
-店
-雇
-疫
-持
-巍
-踮
-境
-只
-亨
-目
-鉴
-崤
-闲
-体
-泄
-杂
-作
-般
-轰
-化
-解
-迂
-诿
-蛭
-璀
-腾
-告
-版
-服
-省
-师
-小
-规
-程
-线
-海
-办
-引
-二
-桧
-牌
-砺
-洄
-裴
-修
-图
-痫
-胡
-许
-犊
-事
-郛
-基
-柴
-呼
-食
-研
-奶
-律
-蛋
-因
-葆
-察
-戏
-褒
-戒
-再
-李
-骁
-工
-貂
-油
-鹅
-章
-啄
-休
-场
-给
-睡
-纷
-豆
-器
-捎
-说
-敏
-学
-会
-浒
-设
-诊
-格
-廓
-查
-来
-霓
-室
-溆
-¢
-诡
-寥
-焕
-舜
-柒
-狐
-回
-戟
-砾
-厄
-实
-翩
-尿
-五
-入
-径
-惭
-喹
-股
-宇
-篝
-|
-;
-美
-期
-云
-九
-祺
-扮
-靠
-锝
-槌
-系
-企
-酰
-阊
-暂
-蚕
-忻
-豁
-本
-羹
-执
-条
-钦
-H
-獒
-限
-进
-季
-楦
-于
-芘
-玖
-铋
-茯
-未
-答
-粘
-括
-样
-精
-欠
-矢
-甥
-帷
-嵩
-扣
-令
-仔
-风
-皈
-行
-支
-部
-蓉
-刮
-站
-蜡
-救
-钊
-汗
-松
-嫌
-成
-可
-.
-鹤
-院
-从
-交
-政
-怕
-活
-调
-球
-局
-验
-髌
-第
-韫
-谗
-串
-到
-圆
-年
-米
-/
-*
-友
-忿
-检
-区
-看
-自
-敢
-刃
-个
-兹
-弄
-流
-留
-同
-没
-齿
-星
-聆
-轼
-湖
-什
-三
-建
-蛔
-儿
-椋
-汕
-震
-颧
-鲤
-跟
-力
-情
-璺
-铨
-陪
-务
-指
-族
-训
-滦
-鄣
-濮
-扒
-商
-箱
-十
-召
-慷
-辗
-所
-莞
-管
-护
-臭
-横
-硒
-嗓
-接
-侦
-六
-露
-党
-馋
-驾
-剖
-高
-侬
-妪
-幂
-猗
-绺
-骐
-央
-酐
-孝
-筝
-课
-徇
-缰
-门
-男
-西
-项
-句
-谙
-瞒
-秃
-篇
-教
-碲
-罚
-声
-呐
-景
-前
-富
-嘴
-鳌
-稀
-免
-朋
-啬
-睐
-去
-赈
-鱼
-住
-肩
-愕
-速
-旁
-波
-厅
-健
-茼
-厥
-鲟
-谅
-投
-攸
-炔
-数
-方
-击
-呋
-谈
-绩
-别
-愫
-僚
-躬
-鹧
-胪
-炳
-招
-喇
-膨
-泵
-蹦
-毛
-结
-5
-4
-谱
-识
-陕
-粽
-婚
-拟
-构
-且
-搜
-任
-潘
-比
-郢
-妨
-醪
-陀
-桔
-碘
-扎
-选
-哈
-骷
-楷
-亿
-明
-缆
-脯
-监
-睫
-逻
-婵
-共
-赴
-淝
-凡
-惦
-及
-达
-揖
-谩
-澹
-减
-焰
-蛹
-番
-祁
-柏
-员
-禄
-怡
-峤
-龙
-白
-叽
-生
-闯
-起
-细
-装
-谕
-竟
-聚
-钙
-上
-导
-渊
-按
-艾
-辘
-挡
-耒
-盹
-饪
-臀
-记
-邮
-蕙
-受
-各
-医
-搂
-普
-滇
-朗
-茸
-带
-翻
-酚
-(
-光
-堤
-墟
-蔷
-万
-幻
-〓
-瑙
-辈
-昧
-盏
-亘
-蛀
-吉
-铰
-请
-子
-假
-闻
-税
-井
-诩
-哨
-嫂
-好
-面
-琐
-校
-馊
-鬣
-缂
-营
-访
-炖
-占
-农
-缀
-否
-经
-钚
-棵
-趟
-张
-亟
-吏
-茶
-谨
-捻
-论
-迸
-堂
-玉
-信
-吧
-瞠
-乡
-姬
-寺
-咬
-溏
-苄
-皿
-意
-赉
-宝
-尔
-钰
-艺
-特
-唳
-踉
-都
-荣
-倚
-登
-荐
-丧
-奇
-涵
-批
-炭
-近
-符
-傩
-感
-道
-着
-菊
-虹
-仲
-众
-懈
-濯
-颞
-眺
-南
-释
-北
-缝
-标
-既
-茗
-整
-撼
-迤
-贲
-挎
-耱
-拒
-某
-妍
-卫
-哇
-英
-矶
-藩
-治
-他
-元
-领
-膜
-遮
-穗
-蛾
-飞
-荒
-棺
-劫
-么
-市
-火
-温
-拈
-棚
-洼
-转
-果
-奕
-卸
-迪
-伸
-泳
-斗
-邡
-侄
-涨
-屯
-萋
-胭
-氡
-崮
-枞
-惧
-冒
-彩
-斜
-手
-豚
-随
-旭
-淑
-妞
-形
-菌
-吲
-沱
-争
-驯
-歹
-挟
-兆
-柱
-传
-至
-包
-内
-响
-临
-红
-功
-弩
-衡
-寂
-禁
-老
-棍
-耆
-渍
-织
-害
-氵
-渑
-布
-载
-靥
-嗬
-虽
-苹
-咨
-娄
-库
-雉
-榜
-帜
-嘲
-套
-瑚
-亲
-簸
-欧
-边
-6
-腿
-旮
-抛
-吹
-瞳
-得
-镓
-梗
-厨
-继
-漾
-愣
-憨
-士
-策
-窑
-抑
-躯
-襟
-脏
-参
-贸
-言
-干
-绸
-鳄
-穷
-藜
-音
-折
-详
-)
-举
-悍
-甸
-癌
-黎
-谴
-死
-罩
-迁
-寒
-驷
-袖
-媒
-蒋
-掘
-模
-纠
-恣
-观
-祖
-蛆
-碍
-位
-稿
-主
-澧
-跌
-筏
-京
-锏
-帝
-贴
-证
-糠
-才
-黄
-鲸
-略
-炯
-饱
-四
-出
-园
-犀
-牧
-容
-汉
-杆
-浈
-汰
-瑷
-造
-虫
-瘩
-怪
-驴
-济
-应
-花
-沣
-谔
-夙
-旅
-价
-矿
-以
-考
-s
-u
-呦
-晒
-巡
-茅
-准
-肟
-瓴
-詹
-仟
-褂
-译
-桌
-混
-宁
-怦
-郑
-抿
-些
-余
-鄂
-饴
-攒
-珑
-群
-阖
-岔
-琨
-藓
-预
-环
-洮
-岌
-宀
-杲
-瀵
-最
-常
-囡
-周
-踊
-女
-鼓
-袭
-喉
-简
-范
-薯
-遐
-疏
-粱
-黜
-禧
-法
-箔
-斤
-遥
-汝
-奥
-直
-贞
-撑
-置
-绱
-集
-她
-馅
-逗
-钧
-橱
-魉
-[
-恙
-躁
-唤
-9
-旺
-膘
-待
-脾
-惫
-购
-吗
-依
-盲
-度
-瘿
-蠖
-俾
-之
-镗
-拇
-鲵
-厝
-簧
-续
-款
-展
-啃
-表
-剔
-品
-钻
-腭
-损
-清
-锶
-统
-涌
-寸
-滨
-贪
-链
-吠
-冈
-伎
-迥
-咏
-吁
-览
-防
-迅
-失
-汾
-阔
-逵
-绀
-蔑
-列
-川
-凭
-努
-熨
-揪
-利
-俱
-绉
-抢
-鸨
-我
-即
-责
-膦
-易
-毓
-鹊
-刹
-玷
-岿
-空
-嘞
-绊
-排
-术
-估
-锷
-违
-们
-苟
-铜
-播
-肘
-件
-烫
-审
-鲂
-广
-像
-铌
-惰
-铟
-巳
-胍
-鲍
-康
-憧
-色
-恢
-想
-拷
-尤
-疳
-知
-S
-Y
-F
-D
-A
-峄
-裕
-帮
-握
-搔
-氐
-氘
-难
-墒
-沮
-雨
-叁
-缥
-悴
-藐
-湫
-娟
-苑
-稠
-颛
-簇
-后
-阕
-闭
-蕤
-缚
-怎
-佞
-码
-嘤
-蔡
-痊
-舱
-螯
-帕
-赫
-昵
-升
-烬
-岫
-、
-疵
-蜻
-髁
-蕨
-隶
-烛
-械
-丑
-盂
-梁
-强
-鲛
-由
-拘
-揉
-劭
-龟
-撤
-钩
-呕
-孛
-费
-妻
-漂
-求
-阑
-崖
-秤
-甘
-通
-深
-补
-赃
-坎
-床
-啪
-承
-吼
-量
-暇
-钼
-烨
-阂
-擎
-脱
-逮
-称
-P
-神
-属
-矗
-华
-届
-狍
-葑
-汹
-育
-患
-窒
-蛰
-佼
-静
-槎
-运
-鳗
-庆
-逝
-曼
-疱
-克
-代
-官
-此
-麸
-耧
-蚌
-晟
-例
-础
-榛
-副
-测
-唰
-缢
-迹
-灬
-霁
-身
-岁
-赭
-扛
-又
-菡
-乜
-雾
-板
-读
-陷
-徉
-贯
-郁
-虑
-变
-钓
-菜
-圾
-现
-琢
-式
-乐
-维
-渔
-浜
-左
-吾
-脑
-钡
-警
-T
-啵
-拴
-偌
-漱
-湿
-硕
-止
-骼
-魄
-积
-燥
-联
-踢
-玛
-则
-窿
-见
-振
-畿
-送
-班
-钽
-您
-赵
-刨
-印
-讨
-踝
-籍
-谡
-舌
-崧
-汽
-蔽
-沪
-酥
-绒
-怖
-财
-帖
-肱
-私
-莎
-勋
-羔
-霸
-励
-哼
-帐
-将
-帅
-渠
-纪
-婴
-娩
-岭
-厘
-滕
-吻
-伤
-坝
-冠
-戊
-隆
-瘁
-介
-涧
-物
-黍
-并
-姗
-奢
-蹑
-掣
-垸
-锴
-命
-箍
-捉
-病
-辖
-琰
-眭
-迩
-艘
-绌
-繁
-寅
-若
-毋
-思
-诉
-类
-诈
-燮
-轲
-酮
-狂
-重
-反
-职
-筱
-县
-委
-磕
-绣
-奖
-晋
-濉
-志
-徽
-肠
-呈
-獐
-坻
-口
-片
-碰
-几
-村
-柿
-劳
-料
-获
-亩
-惕
-晕
-厌
-号
-罢
-池
-正
-鏖
-煨
-家
-棕
-复
-尝
-懋
-蜥
-锅
-岛
-扰
-队
-坠
-瘾
-钬
-@
-卧
-疣
-镇
-譬
-冰
-彷
-频
-黯
-据
-垄
-采
-八
-缪
-瘫
-型
-熹
-砰
-楠
-襁
-箐
-但
-嘶
-绳
-啤
-拍
-盥
-穆
-傲
-洗
-盯
-塘
-怔
-筛
-丿
-台
-恒
-喂
-葛
-永
-¥
-烟
-酒
-桦
-书
-砂
-蚝
-缉
-态
-瀚
-袄
-圳
-轻
-蛛
-超
-榧
-遛
-姒
-奘
-铮
-右
-荽
-望
-偻
-卡
-丶
-氰
-附
-做
-革
-索
-戚
-坨
-桷
-唁
-垅
-榻
-岐
-偎
-坛
-莨
-山
-殊
-微
-骇
-陈
-爨
-推
-嗝
-驹
-澡
-藁
-呤
-卤
-嘻
-糅
-逛
-侵
-郓
-酌
-德
-摇
-※
-鬃
-被
-慨
-殡
-羸
-昌
-泡
-戛
-鞋
-河
-宪
-沿
-玲
-鲨
-翅
-哽
-源
-铅
-语
-照
-邯
-址
-荃
-佬
-顺
-鸳
-町
-霭
-睾
-瓢
-夸
-椁
-晓
-酿
-痈
-咔
-侏
-券
-噎
-湍
-签
-嚷
-离
-午
-尚
-社
-锤
-背
-孟
-使
-浪
-缦
-潍
-鞅
-军
-姹
-驶
-笑
-鳟
-鲁
-》
-孽
-钜
-绿
-洱
-礴
-焯
-椰
-颖
-囔
-乌
-孔
-巴
-互
-性
-椽
-哞
-聘
-昨
-早
-暮
-胶
-炀
-隧
-低
-彗
-昝
-铁
-呓
-氽
-藉
-喔
-癖
-瑗
-姨
-权
-胱
-韦
-堑
-蜜
-酋
-楝
-砝
-毁
-靓
-歙
-锲
-究
-屋
-喳
-骨
-辨
-碑
-武
-鸠
-宫
-辜
-烊
-适
-坡
-殃
-培
-佩
-供
-走
-蜈
-迟
-翼
-况
-姣
-凛
-浔
-吃
-飘
-债
-犟
-金
-促
-苛
-崇
-坂
-莳
-畔
-绂
-兵
-蠕
-斋
-根
-砍
-亢
-欢
-恬
-崔
-剁
-餐
-榫
-快
-扶
-‖
-濒
-缠
-鳜
-当
-彭
-驭
-浦
-篮
-昀
-锆
-秸
-钳
-弋
-娣
-瞑
-夷
-龛
-苫
-拱
-致
-%
-嵊
-障
-隐
-弑
-初
-娓
-抉
-汩
-累
-蓖
-"
-唬
-助
-苓
-昙
-押
-毙
-破
-城
-郧
-逢
-嚏
-獭
-瞻
-溱
-婿
-赊
-跨
-恼
-璧
-萃
-姻
-貉
-灵
-炉
-密
-氛
-陶
-砸
-谬
-衔
-点
-琛
-沛
-枳
-层
-岱
-诺
-脍
-榈
-埂
-征
-冷
-裁
-打
-蹴
-素
-瘘
-逞
-蛐
-聊
-激
-腱
-萘
-踵
-飒
-蓟
-吆
-取
-咙
-簋
-涓
-矩
-曝
-挺
-揣
-座
-你
-史
-舵
-焱
-尘
-苏
-笈
-脚
-溉
-榨
-诵
-樊
-邓
-焊
-义
-庶
-儋
-蟋
-蒲
-赦
-呷
-杞
-诠
-豪
-还
-试
-颓
-茉
-太
-除
-紫
-逃
-痴
-草
-充
-鳕
-珉
-祗
-墨
-渭
-烩
-蘸
-慕
-璇
-镶
-穴
-嵘
-恶
-骂
-险
-绋
-幕
-碉
-肺
-戳
-刘
-潞
-秣
-纾
-潜
-銮
-洛
-须
-罘
-销
-瘪
-汞
-兮
-屉
-r
-林
-厕
-质
-探
-划
-狸
-殚
-善
-煊
-烹
-〒
-锈
-逯
-宸
-辍
-泱
-柚
-袍
-远
-蹋
-嶙
-绝
-峥
-娥
-缍
-雀
-徵
-认
-镱
-谷
-=
-贩
-勉
-撩
-鄯
-斐
-洋
-非
-祚
-泾
-诒
-饿
-撬
-威
-晷
-搭
-芍
-锥
-笺
-蓦
-候
-琊
-档
-礁
-沼
-卵
-荠
-忑
-朝
-凹
-瑞
-头
-仪
-弧
-孵
-畏
-铆
-突
-衲
-车
-浩
-气
-茂
-悖
-厢
-枕
-酝
-戴
-湾
-邹
-飚
-攘
-锂
-写
-宵
-翁
-岷
-无
-喜
-丈
-挑
-嗟
-绛
-殉
-议
-槽
-具
-醇
-淞
-笃
-郴
-阅
-饼
-底
-壕
-砚
-弈
-询
-缕
-庹
-翟
-零
-筷
-暨
-舟
-闺
-甯
-撞
-麂
-茌
-蔼
-很
-珲
-捕
-棠
-角
-阉
-媛
-娲
-诽
-剿
-尉
-爵
-睬
-韩
-诰
-匣
-危
-糍
-镯
-立
-浏
-阳
-少
-盆
-舔
-擘
-匪
-申
-尬
-铣
-旯
-抖
-赘
-瓯
-居
-ˇ
-哮
-游
-锭
-茏
-歌
-坏
-甚
-秒
-舞
-沙
-仗
-劲
-潺
-阿
-燧
-郭
-嗖
-霏
-忠
-材
-奂
-耐
-跺
-砀
-输
-岖
-媳
-氟
-极
-摆
-灿
-今
-扔
-腻
-枝
-奎
-药
-熄
-吨
-话
-q
-额
-慑
-嘌
-协
-喀
-壳
-埭
-视
-著
-於
-愧
-陲
-翌
-峁
-颅
-佛
-腹
-聋
-侯
-咎
-叟
-秀
-颇
-存
-较
-罪
-哄
-岗
-扫
-栏
-钾
-羌
-己
-璨
-枭
-霉
-煌
-涸
-衿
-键
-镝
-益
-岢
-奏
-连
-夯
-睿
-冥
-均
-糖
-狞
-蹊
-稻
-爸
-刿
-胥
-煜
-丽
-肿
-璃
-掸
-跚
-灾
-垂
-樾
-濑
-乎
-莲
-窄
-犹
-撮
-战
-馄
-软
-络
-显
-鸢
-胸
-宾
-妲
-恕
-埔
-蝌
-份
-遇
-巧
-瞟
-粒
-恰
-剥
-桡
-博
-讯
-凯
-堇
-阶
-滤
-卖
-斌
-骚
-彬
-兑
-磺
-樱
-舷
-两
-娱
-福
-仃
-差
-找
-桁
-÷
-净
-把
-阴
-污
-戬
-雷
-碓
-蕲
-楚
-罡
-焖
-抽
-妫
-咒
-仑
-闱
-尽
-邑
-菁
-爱
-贷
-沥
-鞑
-牡
-嗉
-崴
-骤
-塌
-嗦
-订
-拮
-滓
-捡
-锻
-次
-坪
-杩
-臃
-箬
-融
-珂
-鹗
-宗
-枚
-降
-鸬
-妯
-阄
-堰
-盐
-毅
-必
-杨
-崃
-俺
-甬
-状
-莘
-货
-耸
-菱
-腼
-铸
-唏
-痤
-孚
-澳
-懒
-溅
-翘
-疙
-杷
-淼
-缙
-骰
-喊
-悉
-砻
-坷
-艇
-赁
-界
-谤
-纣
-宴
-晃
-茹
-归
-饭
-梢
-铡
-街
-抄
-肼
-鬟
-苯
-颂
-撷
-戈
-炒
-咆
-茭
-瘙
-负
-仰
-客
-琉
-铢
-封
-卑
-珥
-椿
-镧
-窨
-鬲
-寿
-御
-袤
-铃
-萎
-砖
-餮
-脒
-裳
-肪
-孕
-嫣
-馗
-嵇
-恳
-氯
-江
-石
-褶
-冢
-祸
-阻
-狈
-羞
-银
-靳
-透
-咳
-叼
-敷
-芷
-啥
-它
-瓤
-兰
-痘
-懊
-逑
-肌
-往
-捺
-坊
-甩
-呻
-〃
-沦
-忘
-膻
-祟
-菅
-剧
-崆
-智
-坯
-臧
-霍
-墅
-攻
-眯
-倘
-拢
-骠
-铐
-庭
-岙
-瓠
-′
-缺
-泥
-迢
-捶
-?
-?
-郏
-喙
-掷
-沌
-纯
-秘
-种
-听
-绘
-固
-螨
-团
-香
-盗
-妒
-埚
-蓝
-拖
-旱
-荞
-铀
-血
-遏
-汲
-辰
-叩
-拽
-幅
-硬
-惶
-桀
-漠
-措
-泼
-唑
-齐
-肾
-念
-酱
-虚
-屁
-耶
-旗
-砦
-闵
-婉
-馆
-拭
-绅
-韧
-忏
-窝
-醋
-葺
-顾
-辞
-倜
-堆
-辋
-逆
-玟
-贱
-疾
-董
-惘
-倌
-锕
-淘
-嘀
-莽
-俭
-笏
-绑
-鲷
-杈
-择
-蟀
-粥
-嗯
-驰
-逾
-案
-谪
-褓
-胫
-哩
-昕
-颚
-鲢
-绠
-躺
-鹄
-崂
-儒
-俨
-丝
-尕
-泌
-啊
-萸
-彰
-幺
-吟
-骄
-苣
-弦
-脊
-瑰
-〈
-诛
-镁
-析
-闪
-剪
-侧
-哟
-框
-螃
-守
-嬗
-燕
-狭
-铈
-缮
-概
-迳
-痧
-鲲
-俯
-售
-笼
-痣
-扉
-挖
-满
-咋
-援
-邱
-扇
-歪
-便
-玑
-绦
-峡
-蛇
-叨
-〖
-泽
-胃
-斓
-喋
-怂
-坟
-猪
-该
-蚬
-炕
-弥
-赞
-棣
-晔
-娠
-挲
-狡
-创
-疖
-铕
-镭
-稷
-挫
-弭
-啾
-翔
-粉
-履
-苘
-哦
-楼
-秕
-铂
-土
-锣
-瘟
-挣
-栉
-习
-享
-桢
-袅
-磨
-桂
-谦
-延
-坚
-蔚
-噗
-署
-谟
-猬
-钎
-恐
-嬉
-雒
-倦
-衅
-亏
-璩
-睹
-刻
-殿
-王
-算
-雕
-麻
-丘
-柯
-骆
-丸
-塍
-谚
-添
-鲈
-垓
-桎
-蚯
-芥
-予
-飕
-镦
-谌
-窗
-醚
-菀
-亮
-搪
-莺
-蒿
-羁
-足
-J
-真
-轶
-悬
-衷
-靛
-翊
-掩
-哒
-炅
-掐
-冼
-妮
-l
-谐
-稚
-荆
-擒
-犯
-陵
-虏
-浓
-崽
-刍
-陌
-傻
-孜
-千
-靖
-演
-矜
-钕
-煽
-杰
-酗
-渗
-伞
-栋
-俗
-泫
-戍
-罕
-沾
-疽
-灏
-煦
-芬
-磴
-叱
-阱
-榉
-湃
-蜀
-叉
-醒
-彪
-租
-郡
-篷
-屎
-良
-垢
-隗
-弱
-陨
-峪
-砷
-掴
-颁
-胎
-雯
-绵
-贬
-沐
-撵
-隘
-篙
-暖
-曹
-陡
-栓
-填
-臼
-彦
-瓶
-琪
-潼
-哪
-鸡
-摩
-啦
-俟
-锋
-域
-耻
-蔫
-疯
-纹
-撇
-毒
-绶
-痛
-酯
-忍
-爪
-赳
-歆
-嘹
-辕
-烈
-册
-朴
-钱
-吮
-毯
-癜
-娃
-谀
-邵
-厮
-炽
-璞
-邃
-丐
-追
-词
-瓒
-忆
-轧
-芫
-谯
-喷
-弟
-半
-冕
-裙
-掖
-墉
-绮
-寝
-苔
-势
-顷
-褥
-切
-衮
-君
-佳
-嫒
-蚩
-霞
-佚
-洙
-逊
-镖
-暹
-唛
-&
-殒
-顶
-碗
-獗
-轭
-铺
-蛊
-废
-恹
-汨
-崩
-珍
-那
-杵
-曲
-纺
-夏
-薰
-傀
-闳
-淬
-姘
-舀
-拧
-卷
-楂
-恍
-讪
-厩
-寮
-篪
-赓
-乘
-灭
-盅
-鞣
-沟
-慎
-挂
-饺
-鼾
-杳
-树
-缨
-丛
-絮
-娌
-臻
-嗳
-篡
-侩
-述
-衰
-矛
-圈
-蚜
-匕
-筹
-匿
-濞
-晨
-叶
-骋
-郝
-挚
-蚴
-滞
-增
-侍
-描
-瓣
-吖
-嫦
-蟒
-匾
-圣
-赌
-毡
-癞
-恺
-百
-曳
-需
-篓
-肮
-庖
-帏
-卿
-驿
-遗
-蹬
-鬓
-骡
-歉
-芎
-胳
-屐
-禽
-烦
-晌
-寄
-媾
-狄
-翡
-苒
-船
-廉
-终
-痞
-殇
-々
-畦
-饶
-改
-拆
-悻
-萄
-£
-瓿
-乃
-訾
-桅
-匮
-溧
-拥
-纱
-铍
-骗
-蕃
-龋
-缬
-父
-佐
-疚
-栎
-醍
-掳
-蓄
-x
-惆
-颜
-鲆
-榆
-〔
-猎
-敌
-暴
-谥
-鲫
-贾
-罗
-玻
-缄
-扦
-芪
-癣
-落
-徒
-臾
-恿
-猩
-托
-邴
-肄
-牵
-春
-陛
-耀
-刊
-拓
-蓓
-邳
-堕
-寇
-枉
-淌
-啡
-湄
-兽
-酷
-萼
-碚
-濠
-萤
-夹
-旬
-戮
-梭
-琥
-椭
-昔
-勺
-蜊
-绐
-晚
-孺
-僵
-宣
-摄
-冽
-旨
-萌
-忙
-蚤
-眉
-噼
-蟑
-付
-契
-瓜
-悼
-颡
-壁
-曾
-窕
-颢
-澎
-仿
-俑
-浑
-嵌
-浣
-乍
-碌
-褪
-乱
-蔟
-隙
-玩
-剐
-葫
-箫
-纲
-围
-伐
-决
-伙
-漩
-瑟
-刑
-肓
-镳
-缓
-蹭
-氨
-皓
-典
-畲
-坍
-铑
-檐
-塑
-洞
-倬
-储
-胴
-淳
-戾
-吐
-灼
-惺
-妙
-毕
-珐
-缈
-虱
-盖
-羰
-鸿
-磅
-谓
-髅
-娴
-苴
-唷
-蚣
-霹
-抨
-贤
-唠
-犬
-誓
-逍
-庠
-逼
-麓
-籼
-釉
-呜
-碧
-秧
-氩
-摔
-霄
-穸
-纨
-辟
-妈
-映
-完
-牛
-缴
-嗷
-炊
-恩
-荔
-茆
-掉
-紊
-慌
-莓
-羟
-阙
-萁
-磐
-另
-蕹
-辱
-鳐
-湮
-吡
-吩
-唐
-睦
-垠
-舒
-圜
-冗
-瞿
-溺
-芾
-囱
-匠
-僳
-汐
-菩
-饬
-漓
-黑
-霰
-浸
-濡
-窥
-毂
-蒡
-兢
-驻
-鹉
-芮
-诙
-迫
-雳
-厂
-忐
-臆
-猴
-鸣
-蚪
-栈
-箕
-羡
-渐
-莆
-捍
-眈
-哓
-趴
-蹼
-埕
-嚣
-骛
-宏
-淄
-斑
-噜
-严
-瑛
-垃
-椎
-诱
-压
-庾
-绞
-焘
-廿
-抡
-迄
-棘
-夫
-纬
-锹
-眨
-瞌
-侠
-脐
-竞
-瀑
-孳
-骧
-遁
-姜
-颦
-荪
-滚
-萦
-伪
-逸
-粳
-爬
-锁
-矣
-役
-趣
-洒
-颔
-诏
-逐
-奸
-甭
-惠
-攀
-蹄
-泛
-尼
-拼
-阮
-鹰
-亚
-颈
-惑
-勒
-〉
-际
-肛
-爷
-刚
-钨
-丰
-养
-冶
-鲽
-辉
-蔻
-画
-覆
-皴
-妊
-麦
-返
-醉
-皂
-擀
-〗
-酶
-凑
-粹
-悟
-诀
-硖
-港
-卜
-z
-杀
-涕
-±
-舍
-铠
-抵
-弛
-段
-敝
-镐
-奠
-拂
-轴
-跛
-袱
-e
-t
-沉
-菇
-俎
-薪
-峦
-秭
-蟹
-历
-盟
-菠
-寡
-液
-肢
-喻
-染
-裱
-悱
-抱
-氙
-赤
-捅
-猛
-跑
-氮
-谣
-仁
-尺
-辊
-窍
-烙
-衍
-架
-擦
-倏
-璐
-瑁
-币
-楞
-胖
-夔
-趸
-邛
-惴
-饕
-虔
-蝎
-§
-哉
-贝
-宽
-辫
-炮
-扩
-饲
-籽
-魏
-菟
-锰
-伍
-猝
-末
-琳
-哚
-蛎
-邂
-呀
-姿
-鄞
-却
-歧
-仙
-恸
-椐
-森
-牒
-寤
-袒
-婆
-虢
-雅
-钉
-朵
-贼
-欲
-苞
-寰
-故
-龚
-坭
-嘘
-咫
-礼
-硷
-兀
-睢
-汶
-’
-铲
-烧
-绕
-诃
-浃
-钿
-哺
-柜
-讼
-颊
-璁
-腔
-洽
-咐
-脲
-簌
-筠
-镣
-玮
-鞠
-谁
-兼
-姆
-挥
-梯
-蝴
-谘
-漕
-刷
-躏
-宦
-弼
-b
-垌
-劈
-麟
-莉
-揭
-笙
-渎
-仕
-嗤
-仓
-配
-怏
-抬
-错
-泯
-镊
-孰
-猿
-邪
-仍
-秋
-鼬
-壹
-歇
-吵
-炼
-<
-尧
-射
-柬
-廷
-胧
-霾
-凳
-隋
-肚
-浮
-梦
-祥
-株
-堵
-退
-L
-鹫
-跎
-凶
-毽
-荟
-炫
-栩
-玳
-甜
-沂
-鹿
-顽
-伯
-爹
-赔
-蛴
-徐
-匡
-欣
-狰
-缸
-雹
-蟆
-疤
-默
-沤
-啜
-痂
-衣
-禅
-w
-i
-h
-辽
-葳
-黝
-钗
-停
-沽
-棒
-馨
-颌
-肉
-吴
-硫
-悯
-劾
-娈
-马
-啧
-吊
-悌
-镑
-峭
-帆
-瀣
-涉
-咸
-疸
-滋
-泣
-翦
-拙
-癸
-钥
-蜒
-+
-尾
-庄
-凝
-泉
-婢
-渴
-谊
-乞
-陆
-锉
-糊
-鸦
-淮
-I
-B
-N
-晦
-弗
-乔
-庥
-葡
-尻
-席
-橡
-傣
-渣
-拿
-惩
-麋
-斛
-缃
-矮
-蛏
-岘
-鸽
-姐
-膏
-催
-奔
-镒
-喱
-蠡
-摧
-钯
-胤
-柠
-拐
-璋
-鸥
-卢
-荡
-倾
-^
-_
-珀
-逄
-萧
-塾
-掇
-贮
-笆
-聂
-圃
-冲
-嵬
-M
-滔
-笕
-值
-炙
-偶
-蜱
-搐
-梆
-汪
-蔬
-腑
-鸯
-蹇
-敞
-绯
-仨
-祯
-谆
-梧
-糗
-鑫
-啸
-豺
-囹
-猾
-巢
-柄
-瀛
-筑
-踌
-沭
-暗
-苁
-鱿
-蹉
-脂
-蘖
-牢
-热
-木
-吸
-溃
-宠
-序
-泞
-偿
-拜
-檩
-厚
-朐
-毗
-螳
-吞
-媚
-朽
-担
-蝗
-橘
-畴
-祈
-糟
-盱
-隼
-郜
-惜
-珠
-裨
-铵
-焙
-琚
-唯
-咚
-噪
-骊
-丫
-滢
-勤
-棉
-呸
-咣
-淀
-隔
-蕾
-窈
-饨
-挨
-煅
-短
-匙
-粕
-镜
-赣
-撕
-墩
-酬
-馁
-豌
-颐
-抗
-酣
-氓
-佑
-搁
-哭
-递
-耷
-涡
-桃
-贻
-碣
-截
-瘦
-昭
-镌
-蔓
-氚
-甲
-猕
-蕴
-蓬
-散
-拾
-纛
-狼
-猷
-铎
-埋
-旖
-矾
-讳
-囊
-糜
-迈
-粟
-蚂
-紧
-鲳
-瘢
-栽
-稼
-羊
-锄
-斟
-睁
-桥
-瓮
-蹙
-祉
-醺
-鼻
-昱
-剃
-跳
-篱
-跷
-蒜
-翎
-宅
-晖
-嗑
-壑
-峻
-癫
-屏
-狠
-陋
-袜
-途
-憎
-祀
-莹
-滟
-佶
-溥
-臣
-约
-盛
-峰
-磁
-慵
-婪
-拦
-莅
-朕
-鹦
-粲
-裤
-哎
-疡
-嫖
-琵
-窟
-堪
-谛
-嘉
-儡
-鳝
-斩
-郾
-驸
-酊
-妄
-胜
-贺
-徙
-傅
-噌
-钢
-栅
-庇
-恋
-匝
-巯
-邈
-尸
-锚
-粗
-佟
-蛟
-薹
-纵
-蚊
-郅
-绢
-锐
-苗
-俞
-篆
-淆
-膀
-鲜
-煎
-诶
-秽
-寻
-涮
-刺
-怀
-噶
-巨
-褰
-魅
-灶
-灌
-桉
-藕
-谜
-舸
-薄
-搀
-恽
-借
-牯
-痉
-渥
-愿
-亓
-耘
-杠
-柩
-锔
-蚶
-钣
-珈
-喘
-蹒
-幽
-赐
-稗
-晤
-莱
-泔
-扯
-肯
-菪
-裆
-腩
-豉
-疆
-骜
-腐
-倭
-珏
-唔
-粮
-亡
-润
-慰
-伽
-橄
-玄
-誉
-醐
-胆
-龊
-粼
-塬
-陇
-彼
-削
-嗣
-绾
-芽
-妗
-垭
-瘴
-爽
-薏
-寨
-龈
-泠
-弹
-赢
-漪
-猫
-嘧
-涂
-恤
-圭
-茧
-烽
-屑
-痕
-巾
-赖
-荸
-凰
-腮
-畈
-亵
-蹲
-偃
-苇
-澜
-艮
-换
-骺
-烘
-苕
-梓
-颉
-肇
-哗
-悄
-氤
-涠
-葬
-屠
-鹭
-植
-竺
-佯
-诣
-鲇
-瘀
-鲅
-邦
-移
-滁
-冯
-耕
-癔
-戌
-茬
-沁
-巩
-悠
-湘
-洪
-痹
-锟
-循
-谋
-腕
-鳃
-钠
-捞
-焉
-迎
-碱
-伫
-急
-榷
-奈
-邝
-卯
-辄
-皲
-卟
-醛
-畹
-忧
-稳
-雄
-昼
-缩
-阈
-睑
-扌
-耗
-曦
-涅
-捏
-瞧
-邕
-淖
-漉
-铝
-耦
-禹
-湛
-喽
-莼
-琅
-诸
-苎
-纂
-硅
-始
-嗨
-傥
-燃
-臂
-赅
-嘈
-呆
-贵
-屹
-壮
-肋
-亍
-蚀
-卅
-豹
-腆
-邬
-迭
-浊
-}
-童
-螂
-捐
-圩
-勐
-触
-寞
-汊
-壤
-荫
-膺
-渌
-芳
-懿
-遴
-螈
-泰
-蓼
-蛤
-茜
-舅
-枫
-朔
-膝
-眙
-避
-梅
-判
-鹜
-璜
-牍
-缅
-垫
-藻
-黔
-侥
-惚
-懂
-踩
-腰
-腈
-札
-丞
-唾
-慈
-顿
-摹
-荻
-琬
-~
-斧
-沈
-滂
-胁
-胀
-幄
-莜
-Z
-匀
-鄄
-掌
-绰
-茎
-焚
-赋
-萱
-谑
-汁
-铒
-瞎
-夺
-蜗
-野
-娆
-冀
-弯
-篁
-懵
-灞
-隽
-芡
-脘
-俐
-辩
-芯
-掺
-喏
-膈
-蝈
-觐
-悚
-踹
-蔗
-熠
-鼠
-呵
-抓
-橼
-峨
-畜
-缔
-禾
-崭
-弃
-熊
-摒
-凸
-拗
-穹
-蒙
-抒
-祛
-劝
-闫
-扳
-阵
-醌
-踪
-喵
-侣
-搬
-仅
-荧
-赎
-蝾
-琦
-买
-婧
-瞄
-寓
-皎
-冻
-赝
-箩
-莫
-瞰
-郊
-笫
-姝
-筒
-枪
-遣
-煸
-袋
-舆
-痱
-涛
-母
-〇
-启
-践
-耙
-绲
-盘
-遂
-昊
-搞
-槿
-诬
-纰
-泓
-惨
-檬
-亻
-越
-C
-o
-憩
-熵
-祷
-钒
-暧
-塔
-阗
-胰
-咄
-娶
-魔
-琶
-钞
-邻
-扬
-杉
-殴
-咽
-弓
-〆
-髻
-】
-吭
-揽
-霆
-拄
-殖
-脆
-彻
-岩
-芝
-勃
-辣
-剌
-钝
-嘎
-甄
-佘
-皖
-伦
-授
-徕
-憔
-挪
-皇
-庞
-稔
-芜
-踏
-溴
-兖
-卒
-擢
-饥
-鳞
-煲
-‰
-账
-颗
-叻
-斯
-捧
-鳍
-琮
-讹
-蛙
-纽
-谭
-酸
-兔
-莒
-睇
-伟
-觑
-羲
-嗜
-宜
-褐
-旎
-辛
-卦
-诘
-筋
-鎏
-溪
-挛
-熔
-阜
-晰
-鳅
-丢
-奚
-灸
-呱
-献
-陉
-黛
-鸪
-甾
-萨
-疮
-拯
-洲
-疹
-辑
-叙
-恻
-谒
-允
-柔
-烂
-氏
-逅
-漆
-拎
-惋
-扈
-湟
-纭
-啕
-掬
-擞
-哥
-忽
-涤
-鸵
-靡
-郗
-瓷
-扁
-廊
-怨
-雏
-钮
-敦
-E
-懦
-憋
-汀
-拚
-啉
-腌
-岸
-f
-痼
-瞅
-尊
-咀
-眩
-飙
-忌
-仝
-迦
-熬
-毫
-胯
-篑
-茄
-腺
-凄
-舛
-碴
-锵
-诧
-羯
-後
-漏
-汤
-宓
-仞
-蚁
-壶
-谰
-皑
-铄
-棰
-罔
-辅
-晶
-苦
-牟
-闽
-\
-烃
-饮
-聿
-丙
-蛳
-朱
-煤
-涔
-鳖
-犁
-罐
-荼
-砒
-淦
-妤
-黏
-戎
-孑
-婕
-瑾
-戢
-钵
-枣
-捋
-砥
-衩
-狙
-桠
-稣
-阎
-肃
-梏
-诫
-孪
-昶
-婊
-衫
-嗔
-侃
-塞
-蜃
-樵
-峒
-貌
-屿
-欺
-缫
-阐
-栖
-诟
-珞
-荭
-吝
-萍
-嗽
-恂
-啻
-蜴
-磬
-峋
-俸
-豫
-谎
-徊
-镍
-韬
-魇
-晴
-U
-囟
-猜
-蛮
-坐
-囿
-伴
-亭
-肝
-佗
-蝠
-妃
-胞
-滩
-榴
-氖
-垩
-苋
-砣
-扪
-馏
-姓
-轩
-厉
-夥
-侈
-禀
-垒
-岑
-赏
-钛
-辐
-痔
-披
-纸
-碳
-“
-坞
-蠓
-挤
-荥
-沅
-悔
-铧
-帼
-蒌
-蝇
-a
-p
-y
-n
-g
-哀
-浆
-瑶
-凿
-桶
-馈
-皮
-奴
-苜
-佤
-伶
-晗
-铱
-炬
-优
-弊
-氢
-恃
-甫
-攥
-端
-锌
-灰
-稹
-炝
-曙
-邋
-亥
-眶
-碾
-拉
-萝
-绔
-捷
-浍
-腋
-姑
-菖
-凌
-涞
-麽
-锢
-桨
-潢
-绎
-镰
-殆
-锑
-渝
-铬
-困
-绽
-觎
-匈
-糙
-暑
-裹
-鸟
-盔
-肽
-迷
-綦
-『
-亳
-佝
-俘
-钴
-觇
-骥
-仆
-疝
-跪
-婶
-郯
-瀹
-唉
-脖
-踞
-针
-晾
-忒
-扼
-瞩
-叛
-椒
-疟
-嗡
-邗
-肆
-跆
-玫
-忡
-捣
-咧
-唆
-艄
-蘑
-潦
-笛
-阚
-沸
-泻
-掊
-菽
-贫
-斥
-髂
-孢
-镂
-赂
-麝
-鸾
-屡
-衬
-苷
-恪
-叠
-希
-粤
-爻
-喝
-茫
-惬
-郸
-绻
-庸
-撅
-碟
-宄
-妹
-膛
-叮
-饵
-崛
-嗲
-椅
-冤
-搅
-咕
-敛
-尹
-垦
-闷
-蝉
-霎
-勰
-败
-蓑
-泸
-肤
-鹌
-幌
-焦
-浠
-鞍
-刁
-舰
-乙
-竿
-裔
-。
-茵
-函
-伊
-兄
-丨
-娜
-匍
-謇
-莪
-宥
-似
-蝽
-翳
-酪
-翠
-粑
-薇
-祢
-骏
-赠
-叫
-Q
-噤
-噻
-竖
-芗
-莠
-潭
-俊
-羿
-耜
-O
-郫
-趁
-嗪
-囚
-蹶
-芒
-洁
-笋
-鹑
-敲
-硝
-啶
-堡
-渲
-揩
-』
-携
-宿
-遒
-颍
-扭
-棱
-割
-萜
-蔸
-葵
-琴
-捂
-饰
-衙
-耿
-掠
-募
-岂
-窖
-涟
-蔺
-瘤
-柞
-瞪
-怜
-匹
-距
-楔
-炜
-哆
-秦
-缎
-幼
-茁
-绪
-痨
-恨
-楸
-娅
-瓦
-桩
-雪
-嬴
-伏
-榔
-妥
-铿
-拌
-眠
-雍
-缇
-‘
-卓
-搓
-哌
-觞
-噩
-屈
-哧
-髓
-咦
-巅
-娑
-侑
-淫
-膳
-祝
-勾
-姊
-莴
-胄
-疃
-薛
-蜷
-胛
-巷
-芙
-芋
-熙
-闰
-勿
-窃
-狱
-剩
-钏
-幢
-陟
-铛
-慧
-靴
-耍
-k
-浙
-浇
-飨
-惟
-绗
-祜
-澈
-啼
-咪
-磷
-摞
-诅
-郦
-抹
-跃
-壬
-吕
-肖
-琏
-颤
-尴
-剡
-抠
-凋
-赚
-泊
-津
-宕
-殷
-倔
-氲
-漫
-邺
-涎
-怠
-$
-垮
-荬
-遵
-俏
-叹
-噢
-饽
-蜘
-孙
-筵
-疼
-鞭
-羧
-牦
-箭
-潴
-c
-眸
-祭
-髯
-啖
-坳
-愁
-芩
-驮
-倡
-巽
-穰
-沃
-胚
-怒
-凤
-槛
-剂
-趵
-嫁
-v
-邢
-灯
-鄢
-桐
-睽
-檗
-锯
-槟
-婷
-嵋
-圻
-诗
-蕈
-颠
-遭
-痢
-芸
-怯
-馥
-竭
-锗
-徜
-恭
-遍
-籁
-剑
-嘱
-苡
-龄
-僧
-桑
-潸
-弘
-澶
-楹
-悲
-讫
-愤
-腥
-悸
-谍
-椹
-呢
-桓
-葭
-攫
-阀
-翰
-躲
-敖
-柑
-郎
-笨
-橇
-呃
-魁
-燎
-脓
-葩
-磋
-垛
-玺
-狮
-沓
-砜
-蕊
-锺
-罹
-蕉
-翱
-虐
-闾
-巫
-旦
-茱
-嬷
-枯
-鹏
-贡
-芹
-汛
-矫
-绁
-拣
-禺
-佃
-讣
-舫
-惯
-乳
-趋
-疲
-挽
-岚
-虾
-衾
-蠹
-蹂
-飓
-氦
-铖
-孩
-稞
-瑜
-壅
-掀
-勘
-妓
-畅
-髋
-W
-庐
-牲
-蓿
-榕
-练
-垣
-唱
-邸
-菲
-昆
-婺
-穿
-绡
-麒
-蚱
-掂
-愚
-泷
-涪
-漳
-妩
-娉
-榄
-讷
-觅
-旧
-藤
-煮
-呛
-柳
-腓
-叭
-庵
-烷
-阡
-罂
-蜕
-擂
-猖
-咿
-媲
-脉
-【
-沏
-貅
-黠
-熏
-哲
-烁
-坦
-酵
-兜
-×
-潇
-撒
-剽
-珩
-圹
-乾
-摸
-樟
-帽
-嗒
-襄
-魂
-轿
-憬
-锡
-〕
-喃
-皆
-咖
-隅
-脸
-残
-泮
-袂
-鹂
-珊
-囤
-捆
-咤
-误
-徨
-闹
-淙
-芊
-淋
-怆
-囗
-拨
-梳
-渤
-R
-G
-绨
-蚓
-婀
-幡
-狩
-麾
-谢
-唢
-裸
-旌
-伉
-纶
-裂
-驳
-砼
-咛
-澄
-樨
-蹈
-宙
-澍
-倍
-貔
-操
-勇
-蟠
-摈
-砧
-虬
-够
-缁
-悦
-藿
-撸
-艹
-摁
-淹
-豇
-虎
-榭
-ˉ
-吱
-d
-°
-喧
-荀
-踱
-侮
-奋
-偕
-饷
-犍
-惮
-坑
-璎
-徘
-宛
-妆
-袈
-倩
-窦
-昂
-荏
-乖
-K
-怅
-撰
-鳙
-牙
-袁
-酞
-X
-痿
-琼
-闸
-雁
-趾
-荚
-虻
-涝
-《
-杏
-韭
-偈
-烤
-绫
-鞘
-卉
-症
-遢
-蓥
-诋
-杭
-荨
-匆
-竣
-簪
-辙
-敕
-虞
-丹
-缭
-咩
-黟
-m
-淤
-瑕
-咂
-铉
-硼
-茨
-嶂
-痒
-畸
-敬
-涿
-粪
-窘
-熟
-叔
-嫔
-盾
-忱
-裘
-憾
-梵
-赡
-珙
-咯
-娘
-庙
-溯
-胺
-葱
-痪
-摊
-荷
-卞
-乒
-髦
-寐
-铭
-坩
-胗
-枷
-爆
-溟
-嚼
-羚
-砬
-轨
-惊
-挠
-罄
-竽
-菏
-氧
-浅
-楣
-盼
-枢
-炸
-阆
-杯
-谏
-噬
-淇
-渺
-俪
-秆
-墓
-泪
-跻
-砌
-痰
-垡
-渡
-耽
-釜
-讶
-鳎
-煞
-呗
-韶
-舶
-绷
-鹳
-缜
-旷
-铊
-皱
-龌
-檀
-霖
-奄
-槐
-艳
-蝶
-旋
-哝
-赶
-骞
-蚧
-腊
-盈
-丁
-`
-蜚
-矸
-蝙
-睨
-嚓
-僻
-鬼
-醴
-夜
-彝
-磊
-笔
-拔
-栀
-糕
-厦
-邰
-纫
-逭
-纤
-眦
-膊
-馍
-躇
-烯
-蘼
-冬
-诤
-暄
-骶
-哑
-瘠
-」
-臊
-丕
-愈
-咱
-螺
-擅
-跋
-搏
-硪
-谄
-笠
-淡
-嘿
-骅
-谧
-鼎
-皋
-姚
-歼
-蠢
-驼
-耳
-胬
-挝
-涯
-狗
-蒽
-孓
-犷
-凉
-芦
-箴
-铤
-孤
-嘛
-坤
-V
-茴
-朦
-挞
-尖
-橙
-诞
-搴
-碇
-洵
-浚
-帚
-蜍
-漯
-柘
-嚎
-讽
-芭
-荤
-咻
-祠
-秉
-跖
-埃
-吓
-糯
-眷
-馒
-惹
-娼
-鲑
-嫩
-讴
-轮
-瞥
-靶
-褚
-乏
-缤
-宋
-帧
-删
-驱
-碎
-扑
-俩
-俄
-偏
-涣
-竹
-噱
-皙
-佰
-渚
-唧
-斡
-#
-镉
-刀
-崎
-筐
-佣
-夭
-贰
-肴
-峙
-哔
-艿
-匐
-牺
-镛
-缘
-仡
-嫡
-劣
-枸
-堀
-梨
-簿
-鸭
-蒸
-亦
-稽
-浴
-{
-衢
-束
-槲
-j
-阁
-揍
-疥
-棋
-潋
-聪
-窜
-乓
-睛
-插
-冉
-阪
-苍
-搽
-「
-蟾
-螟
-幸
-仇
-樽
-撂
-慢
-跤
-幔
-俚
-淅
-覃
-觊
-溶
-妖
-帛
-侨
-曰
-妾
-泗
-·
-:
-瀘
-風
-Ë
-(
-)
-∶
-紅
-紗
-瑭
-雲
-頭
-鶏
-財
-許
-•
-¥
-樂
-焗
-麗
-—
-;
-滙
-東
-榮
-繪
-興
-…
-門
-業
-π
-楊
-國
-顧
-é
-盤
-寳
-Λ
-龍
-鳳
-島
-誌
-緣
-結
-銭
-萬
-勝
-祎
-璟
-優
-歡
-臨
-時
-購
-=
-★
-藍
-昇
-鐵
-觀
-勅
-農
-聲
-畫
-兿
-術
-發
-劉
-記
-專
-耑
-園
-書
-壴
-種
-Ο
-●
-褀
-號
-銀
-匯
-敟
-锘
-葉
-橪
-廣
-進
-蒄
-鑽
-阝
-祙
-貢
-鍋
-豊
-夬
-喆
-團
-閣
-開
-燁
-賓
-館
-酡
-沔
-順
-+
-硚
-劵
-饸
-陽
-車
-湓
-復
-萊
-氣
-軒
-華
-堃
-迮
-纟
-戶
-馬
-學
-裡
-電
-嶽
-獨
-マ
-シ
-サ
-ジ
-燘
-袪
-環
-❤
-臺
-灣
-専
-賣
-孖
-聖
-攝
-線
-▪
-α
-傢
-俬
-夢
-達
-莊
-喬
-貝
-薩
-劍
-羅
-壓
-棛
-饦
-尃
-璈
-囍
-醫
-G
-I
-A
-#
-N
-鷄
-髙
-嬰
-啓
-約
-隹
-潔
-賴
-藝
-~
-寶
-籣
-麺
-
-嶺
-√
-義
-網
-峩
-長
-∧
-魚
-機
-構
-②
-鳯
-偉
-L
-B
-㙟
-畵
-鴿
-'
-詩
-溝
-嚞
-屌
-藔
-佧
-玥
-蘭
-織
-1
-3
-9
-0
-7
-點
-砭
-鴨
-鋪
-銘
-廳
-弍
-‧
-創
-湯
-坶
-℃
-卩
-骝
-&
-烜
-荘
-當
-潤
-扞
-係
-懷
-碶
-钅
-蚨
-讠
-☆
-叢
-爲
-埗
-涫
-塗
-→
-楽
-現
-鯨
-愛
-瑪
-鈺
-忄
-悶
-藥
-飾
-樓
-視
-孬
-ㆍ
-燚
-苪
-師
-①
-丼
-锽
-│
-韓
-標
-è
-兒
-閏
-匋
-張
-漢
-Ü
-髪
-會
-閑
-檔
-習
-裝
-の
-峯
-菘
-輝
-И
-雞
-釣
-億
-浐
-K
-O
-R
-8
-H
-E
-P
-T
-W
-D
-S
-C
-M
-F
-姌
-饹
-»
-晞
-廰
-ä
-嵯
-鷹
-負
-飲
-絲
-冚
-楗
-澤
-綫
-區
-❋
-←
-質
-靑
-揚
-③
-滬
-統
-産
-協
-﹑
-乸
-畐
-經
-運
-際
-洺
-岽
-為
-粵
-諾
-崋
-豐
-碁
-ɔ
-V
-2
-6
-齋
-誠
-訂
-´
-勑
-雙
-陳
-無
-í
-泩
-媄
-夌
-刂
-i
-c
-t
-o
-r
-a
-嘢
-耄
-燴
-暃
-壽
-媽
-靈
-抻
-體
-唻
-É
-冮
-甹
-鎮
-錦
-ʌ
-蜛
-蠄
-尓
-駕
-戀
-飬
-逹
-倫
-貴
-極
-Я
-Й
-寬
-磚
-嶪
-郎
-職
-|
-間
-n
-d
-剎
-伈
-課
-飛
-橋
-瘊
-№
-譜
-骓
-圗
-滘
-縣
-粿
-咅
-養
-濤
-彳
-®
-%
-Ⅱ
-啰
-㴪
-見
-矞
-薬
-糁
-邨
-鲮
-顔
-罱
-З
-選
-話
-贏
-氪
-俵
-競
-瑩
-繡
-枱
-β
-綉
-á
-獅
-爾
-™
-麵
-戋
-淩
-徳
-個
-劇
-場
-務
-簡
-寵
-h
-實
-膠
-轱
-圖
-築
-嘣
-樹
-㸃
-營
-耵
-孫
-饃
-鄺
-飯
-麯
-遠
-輸
-坫
-孃
-乚
-閃
-鏢
-㎡
-題
-廠
-關
-↑
-爺
-將
-軍
-連
-篦
-覌
-參
-箸
--
-窠
-棽
-寕
-夀
-爰
-歐
-呙
-閥
-頡
-熱
-雎
-垟
-裟
-凬
-勁
-帑
-馕
-夆
-疌
-枼
-馮
-貨
-蒤
-樸
-彧
-旸
-靜
-龢
-暢
-㐱
-鳥
-珺
-鏡
-灡
-爭
-堷
-廚
-Ó
-騰
-診
-┅
-蘇
-褔
-凱
-頂
-豕
-亞
-帥
-嘬
-⊥
-仺
-桖
-複
-饣
-絡
-穂
-顏
-棟
-納
-▏
-濟
-親
-設
-計
-攵
-埌
-烺
-ò
-頤
-燦
-蓮
-撻
-節
-講
-濱
-濃
-娽
-洳
-朿
-燈
-鈴
-護
-膚
-铔
-過
-補
-Z
-U
-5
-4
-坋
-闿
-䖝
-餘
-缐
-铞
-貿
-铪
-桼
-趙
-鍊
-[
-㐂
-垚
-菓
-揸
-捲
-鐘
-滏
-𣇉
-爍
-輪
-燜
-鴻
-鮮
-動
-鹞
-鷗
-丄
-慶
-鉌
-翥
-飮
-腸
-⇋
-漁
-覺
-來
-熘
-昴
-翏
-鲱
-圧
-鄉
-萭
-頔
-爐
-嫚
-г
-貭
-類
-聯
-幛
-輕
-訓
-鑒
-夋
-锨
-芃
-珣
-䝉
-扙
-嵐
-銷
-處
-ㄱ
-語
-誘
-苝
-歸
-儀
-燒
-楿
-內
-粢
-葒
-奧
-麥
-礻
-滿
-蠔
-穵
-瞭
-態
-鱬
-榞
-硂
-鄭
-黃
-煙
-祐
-奓
-逺
-*
-瑄
-獲
-聞
-薦
-讀
-這
-樣
-決
-問
-啟
-們
-執
-説
-轉
-單
-隨
-唘
-帶
-倉
-庫
-還
-贈
-尙
-皺
-■
-餅
-產
-○
-∈
-報
-狀
-楓
-賠
-琯
-嗮
-禮
-`
-傳
->
-≤
-嗞
-Φ
-≥
-換
-咭
-∣
-↓
-曬
-ε
-応
-寫
-″
-終
-様
-純
-費
-療
-聨
-凍
-壐
-郵
-ü
-黒
-∫
-製
-塊
-調
-軽
-確
-撃
-級
-馴
-Ⅲ
-涇
-繹
-數
-碼
-證
-狒
-処
-劑
-<
-晧
-賀
-衆
-]
-櫥
-兩
-陰
-絶
-對
-鯉
-憶
-◎
-p
-e
-Y
-蕒
-煖
-頓
-測
-試
-鼽
-僑
-碩
-妝
-帯
-≈
-鐡
-舖
-權
-喫
-倆
-ˋ
-該
-悅
-ā
-俫
-.
-f
-s
-b
-m
-k
-g
-u
-j
-貼
-淨
-濕
-針
-適
-備
-l
-/
-給
-謢
-強
-觸
-衛
-與
-⊙
-$
-緯
-變
-⑴
-⑵
-⑶
-㎏
-殺
-∩
-幚
-─
-價
-▲
-離
-ú
-ó
-飄
-烏
-関
-閟
-﹝
-﹞
-邏
-輯
-鍵
-驗
-訣
-導
-歷
-屆
-層
-▼
-儱
-錄
-熳
-ē
-艦
-吋
-錶
-辧
-飼
-顯
-④
-禦
-販
-気
-対
-枰
-閩
-紀
-幹
-瞓
-貊
-淚
-△
-眞
-墊
-Ω
-獻
-褲
-縫
-緑
-亜
-鉅
-餠
-{
-}
-◆
-蘆
-薈
-█
-◇
-溫
-彈
-晳
-粧
-犸
-穩
-訊
-崬
-凖
-熥
-П
-舊
-條
-紋
-圍
-Ⅳ
-筆
-尷
-難
-雜
-錯
-綁
-識
-頰
-鎖
-艶
-□
-殁
-殼
-⑧
-├
-▕
-鵬
-ǐ
-ō
-ǒ
-糝
-綱
-▎
-μ
-盜
-饅
-醬
-籤
-蓋
-釀
-鹽
-據
-à
-ɡ
-辦
-◥
-彐
-┌
-婦
-獸
-鲩
-伱
-ī
-蒟
-蒻
-齊
-袆
-腦
-寧
-凈
-妳
-煥
-詢
-偽
-謹
-啫
-鯽
-騷
-鱸
-損
-傷
-鎻
-髮
-買
-冏
-儥
-両
-﹢
-∞
-載
-喰
-z
-羙
-悵
-燙
-曉
-員
-組
-徹
-艷
-痠
-鋼
-鼙
-縮
-細
-嚒
-爯
-≠
-維
-"
-鱻
-壇
-厍
-帰
-浥
-犇
-薡
-軎
-²
-應
-醜
-刪
-緻
-鶴
-賜
-噁
-軌
-尨
-镔
-鷺
-槗
-彌
-葚
-濛
-請
-溇
-緹
-賢
-訪
-獴
-瑅
-資
-縤
-陣
-蕟
-栢
-韻
-祼
-恁
-伢
-謝
-劃
-涑
-總
-衖
-踺
-砋
-凉
-籃
-駿
-苼
-瘋
-昽
-紡
-驊
-腎
-﹗
-響
-杋
-剛
-嚴
-禪
-歓
-槍
-傘
-檸
-檫
-炣
-勢
-鏜
-鎢
-銑
-尐
-減
-奪
-惡
-θ
-僮
-婭
-臘
-ū
-ì
-殻
-鉄
-∑
-蛲
-焼
-緖
-續
-紹
-懮
\ No newline at end of file
diff --git a/godo/deps/extract.go b/godo/deps/extract.go
deleted file mode 100644
index d0f7793..0000000
--- a/godo/deps/extract.go
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * GodoAI - A software focused on localizing AI applications
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package deps
-
-import (
- "archive/zip"
- "bytes"
- "fmt"
- "godo/libs"
- "io"
- "os"
- "path/filepath"
- "strings"
-)
-
-func InitDir() error {
- // 获取当前用户主目录
- runDir := libs.GetAiExeDir()
- if !libs.PathExists(runDir) {
- if err := os.MkdirAll(runDir, 0o755); err != nil {
- return fmt.Errorf("failed to create user directory: %v", err)
- }
- err := ExtractEmbeddedZip(runDir)
- if err != nil {
- return fmt.Errorf("failed to extract embedded zip: %v", err)
- }
-
- }
-
- return nil
-}
-
-// ExtractEmbeddedZip 解压嵌入的ZIP文件到指定目录
-func ExtractEmbeddedZip(exeDir string) error {
- // 使用内存缓冲区来读取嵌入的ZIP数据
- reader := bytes.NewReader(embeddedZip)
- zipReader, err := zip.NewReader(reader, int64(len(embeddedZip)))
- if err != nil {
- return fmt.Errorf("failed to create zip reader: %v", err)
- }
-
- // 遍历ZIP文件中的每个条目并解压
- for _, zipEntry := range zipReader.File {
- // 检查条目名称是否以"."开头,如果是,则跳过
- if strings.HasPrefix(zipEntry.Name, ".") {
- fmt.Printf("Skipping hidden entry: %s\n", zipEntry.Name)
- continue
- }
-
- // 构建解压后的文件或目录路径
- entryPath := filepath.Join(exeDir, zipEntry.Name)
-
- // 如果是目录,则创建目录
- if zipEntry.FileInfo().IsDir() {
- if err := os.MkdirAll(entryPath, zipEntry.Mode()); err != nil {
- return fmt.Errorf("failed to create directory: %v", err)
- }
- continue
- }
-
- // 如果是文件,则解压文件
- zipFile, err := zipEntry.Open()
- if err != nil {
- return fmt.Errorf("failed to open zip file entry: %v", err)
- }
- defer zipFile.Close()
-
- // 确保目标文件的父目录存在
- if err := os.MkdirAll(filepath.Dir(entryPath), 0755); err != nil {
- return fmt.Errorf("failed to create parent directory: %v", err)
- }
-
- dstFile, err := os.OpenFile(entryPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755)
- if err != nil {
- return fmt.Errorf("failed to create destination file: %v", err)
- }
- defer dstFile.Close()
-
- if _, err := io.Copy(dstFile, zipFile); err != nil {
- return fmt.Errorf("failed to copy content to destination file: %v", err)
- }
- }
-
- fmt.Println("Embedded ZIP extracted to", exeDir)
- return nil
-}
diff --git a/godo/deps/linux.go b/godo/deps/linux.go
deleted file mode 100644
index 7a581ca..0000000
--- a/godo/deps/linux.go
+++ /dev/null
@@ -1,27 +0,0 @@
-//go:build linux
-
-/*
- * GodoAI - A software focused on localizing AI applications
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package deps
-
-import (
- _ "embed"
-)
-
-//go:embed linux.zip
-var embeddedZip []byte
diff --git a/godo/deps/linux/goconv/pdf/pdfimages b/godo/deps/linux/goconv/pdf/pdfimages
deleted file mode 100644
index 95485bd..0000000
Binary files a/godo/deps/linux/goconv/pdf/pdfimages and /dev/null differ
diff --git a/godo/deps/linux/goconv/pdf/pdftohtml b/godo/deps/linux/goconv/pdf/pdftohtml
deleted file mode 100644
index fd265ee..0000000
Binary files a/godo/deps/linux/goconv/pdf/pdftohtml and /dev/null differ
diff --git a/godo/deps/linux/goconv/pdf/pdftopng b/godo/deps/linux/goconv/pdf/pdftopng
deleted file mode 100644
index d83f9ad..0000000
Binary files a/godo/deps/linux/goconv/pdf/pdftopng and /dev/null differ
diff --git a/godo/deps/linux/goconv/rapid/RapidOcrOnnx b/godo/deps/linux/goconv/rapid/RapidOcrOnnx
deleted file mode 100644
index 7b03a86..0000000
Binary files a/godo/deps/linux/goconv/rapid/RapidOcrOnnx and /dev/null differ
diff --git a/godo/deps/linux/goconv/rapid/models/ch_PP-OCRv4_det_infer-v7.onnx b/godo/deps/linux/goconv/rapid/models/ch_PP-OCRv4_det_infer-v7.onnx
deleted file mode 100644
index be54729..0000000
Binary files a/godo/deps/linux/goconv/rapid/models/ch_PP-OCRv4_det_infer-v7.onnx and /dev/null differ
diff --git a/godo/deps/linux/goconv/rapid/models/ch_PP-OCRv4_rec_infer-v7.onnx b/godo/deps/linux/goconv/rapid/models/ch_PP-OCRv4_rec_infer-v7.onnx
deleted file mode 100644
index 9522c96..0000000
Binary files a/godo/deps/linux/goconv/rapid/models/ch_PP-OCRv4_rec_infer-v7.onnx and /dev/null differ
diff --git a/godo/deps/linux/goconv/rapid/models/ch_ppocr_mobile_v2.0_cls_infer.onnx b/godo/deps/linux/goconv/rapid/models/ch_ppocr_mobile_v2.0_cls_infer.onnx
deleted file mode 100644
index 6b9d102..0000000
Binary files a/godo/deps/linux/goconv/rapid/models/ch_ppocr_mobile_v2.0_cls_infer.onnx and /dev/null differ
diff --git a/godo/deps/linux/goconv/rapid/models/ppocr_keys_v1.txt b/godo/deps/linux/goconv/rapid/models/ppocr_keys_v1.txt
deleted file mode 100644
index 84b885d..0000000
--- a/godo/deps/linux/goconv/rapid/models/ppocr_keys_v1.txt
+++ /dev/null
@@ -1,6623 +0,0 @@
-'
-疗
-绚
-诚
-娇
-溜
-题
-贿
-者
-廖
-更
-纳
-加
-奉
-公
-一
-就
-汴
-计
-与
-路
-房
-原
-妇
-2
-0
-8
--
-7
-其
->
-:
-]
-,
-,
-骑
-刈
-全
-消
-昏
-傈
-安
-久
-钟
-嗅
-不
-影
-处
-驽
-蜿
-资
-关
-椤
-地
-瘸
-专
-问
-忖
-票
-嫉
-炎
-韵
-要
-月
-田
-节
-陂
-鄙
-捌
-备
-拳
-伺
-眼
-网
-盎
-大
-傍
-心
-东
-愉
-汇
-蹿
-科
-每
-业
-里
-航
-晏
-字
-平
-录
-先
-1
-3
-彤
-鲶
-产
-稍
-督
-腴
-有
-象
-岳
-注
-绍
-在
-泺
-文
-定
-核
-名
-水
-过
-理
-让
-偷
-率
-等
-这
-发
-”
-为
-含
-肥
-酉
-相
-鄱
-七
-编
-猥
-锛
-日
-镀
-蒂
-掰
-倒
-辆
-栾
-栗
-综
-涩
-州
-雌
-滑
-馀
-了
-机
-块
-司
-宰
-甙
-兴
-矽
-抚
-保
-用
-沧
-秩
-如
-收
-息
-滥
-页
-疑
-埠
-!
-!
-姥
-异
-橹
-钇
-向
-下
-跄
-的
-椴
-沫
-国
-绥
-獠
-报
-开
-民
-蜇
-何
-分
-凇
-长
-讥
-藏
-掏
-施
-羽
-中
-讲
-派
-嘟
-人
-提
-浼
-间
-世
-而
-古
-多
-倪
-唇
-饯
-控
-庚
-首
-赛
-蜓
-味
-断
-制
-觉
-技
-替
-艰
-溢
-潮
-夕
-钺
-外
-摘
-枋
-动
-双
-单
-啮
-户
-枇
-确
-锦
-曜
-杜
-或
-能
-效
-霜
-盒
-然
-侗
-电
-晁
-放
-步
-鹃
-新
-杖
-蜂
-吒
-濂
-瞬
-评
-总
-隍
-对
-独
-合
-也
-是
-府
-青
-天
-诲
-墙
-组
-滴
-级
-邀
-帘
-示
-已
-时
-骸
-仄
-泅
-和
-遨
-店
-雇
-疫
-持
-巍
-踮
-境
-只
-亨
-目
-鉴
-崤
-闲
-体
-泄
-杂
-作
-般
-轰
-化
-解
-迂
-诿
-蛭
-璀
-腾
-告
-版
-服
-省
-师
-小
-规
-程
-线
-海
-办
-引
-二
-桧
-牌
-砺
-洄
-裴
-修
-图
-痫
-胡
-许
-犊
-事
-郛
-基
-柴
-呼
-食
-研
-奶
-律
-蛋
-因
-葆
-察
-戏
-褒
-戒
-再
-李
-骁
-工
-貂
-油
-鹅
-章
-啄
-休
-场
-给
-睡
-纷
-豆
-器
-捎
-说
-敏
-学
-会
-浒
-设
-诊
-格
-廓
-查
-来
-霓
-室
-溆
-¢
-诡
-寥
-焕
-舜
-柒
-狐
-回
-戟
-砾
-厄
-实
-翩
-尿
-五
-入
-径
-惭
-喹
-股
-宇
-篝
-|
-;
-美
-期
-云
-九
-祺
-扮
-靠
-锝
-槌
-系
-企
-酰
-阊
-暂
-蚕
-忻
-豁
-本
-羹
-执
-条
-钦
-H
-獒
-限
-进
-季
-楦
-于
-芘
-玖
-铋
-茯
-未
-答
-粘
-括
-样
-精
-欠
-矢
-甥
-帷
-嵩
-扣
-令
-仔
-风
-皈
-行
-支
-部
-蓉
-刮
-站
-蜡
-救
-钊
-汗
-松
-嫌
-成
-可
-.
-鹤
-院
-从
-交
-政
-怕
-活
-调
-球
-局
-验
-髌
-第
-韫
-谗
-串
-到
-圆
-年
-米
-/
-*
-友
-忿
-检
-区
-看
-自
-敢
-刃
-个
-兹
-弄
-流
-留
-同
-没
-齿
-星
-聆
-轼
-湖
-什
-三
-建
-蛔
-儿
-椋
-汕
-震
-颧
-鲤
-跟
-力
-情
-璺
-铨
-陪
-务
-指
-族
-训
-滦
-鄣
-濮
-扒
-商
-箱
-十
-召
-慷
-辗
-所
-莞
-管
-护
-臭
-横
-硒
-嗓
-接
-侦
-六
-露
-党
-馋
-驾
-剖
-高
-侬
-妪
-幂
-猗
-绺
-骐
-央
-酐
-孝
-筝
-课
-徇
-缰
-门
-男
-西
-项
-句
-谙
-瞒
-秃
-篇
-教
-碲
-罚
-声
-呐
-景
-前
-富
-嘴
-鳌
-稀
-免
-朋
-啬
-睐
-去
-赈
-鱼
-住
-肩
-愕
-速
-旁
-波
-厅
-健
-茼
-厥
-鲟
-谅
-投
-攸
-炔
-数
-方
-击
-呋
-谈
-绩
-别
-愫
-僚
-躬
-鹧
-胪
-炳
-招
-喇
-膨
-泵
-蹦
-毛
-结
-5
-4
-谱
-识
-陕
-粽
-婚
-拟
-构
-且
-搜
-任
-潘
-比
-郢
-妨
-醪
-陀
-桔
-碘
-扎
-选
-哈
-骷
-楷
-亿
-明
-缆
-脯
-监
-睫
-逻
-婵
-共
-赴
-淝
-凡
-惦
-及
-达
-揖
-谩
-澹
-减
-焰
-蛹
-番
-祁
-柏
-员
-禄
-怡
-峤
-龙
-白
-叽
-生
-闯
-起
-细
-装
-谕
-竟
-聚
-钙
-上
-导
-渊
-按
-艾
-辘
-挡
-耒
-盹
-饪
-臀
-记
-邮
-蕙
-受
-各
-医
-搂
-普
-滇
-朗
-茸
-带
-翻
-酚
-(
-光
-堤
-墟
-蔷
-万
-幻
-〓
-瑙
-辈
-昧
-盏
-亘
-蛀
-吉
-铰
-请
-子
-假
-闻
-税
-井
-诩
-哨
-嫂
-好
-面
-琐
-校
-馊
-鬣
-缂
-营
-访
-炖
-占
-农
-缀
-否
-经
-钚
-棵
-趟
-张
-亟
-吏
-茶
-谨
-捻
-论
-迸
-堂
-玉
-信
-吧
-瞠
-乡
-姬
-寺
-咬
-溏
-苄
-皿
-意
-赉
-宝
-尔
-钰
-艺
-特
-唳
-踉
-都
-荣
-倚
-登
-荐
-丧
-奇
-涵
-批
-炭
-近
-符
-傩
-感
-道
-着
-菊
-虹
-仲
-众
-懈
-濯
-颞
-眺
-南
-释
-北
-缝
-标
-既
-茗
-整
-撼
-迤
-贲
-挎
-耱
-拒
-某
-妍
-卫
-哇
-英
-矶
-藩
-治
-他
-元
-领
-膜
-遮
-穗
-蛾
-飞
-荒
-棺
-劫
-么
-市
-火
-温
-拈
-棚
-洼
-转
-果
-奕
-卸
-迪
-伸
-泳
-斗
-邡
-侄
-涨
-屯
-萋
-胭
-氡
-崮
-枞
-惧
-冒
-彩
-斜
-手
-豚
-随
-旭
-淑
-妞
-形
-菌
-吲
-沱
-争
-驯
-歹
-挟
-兆
-柱
-传
-至
-包
-内
-响
-临
-红
-功
-弩
-衡
-寂
-禁
-老
-棍
-耆
-渍
-织
-害
-氵
-渑
-布
-载
-靥
-嗬
-虽
-苹
-咨
-娄
-库
-雉
-榜
-帜
-嘲
-套
-瑚
-亲
-簸
-欧
-边
-6
-腿
-旮
-抛
-吹
-瞳
-得
-镓
-梗
-厨
-继
-漾
-愣
-憨
-士
-策
-窑
-抑
-躯
-襟
-脏
-参
-贸
-言
-干
-绸
-鳄
-穷
-藜
-音
-折
-详
-)
-举
-悍
-甸
-癌
-黎
-谴
-死
-罩
-迁
-寒
-驷
-袖
-媒
-蒋
-掘
-模
-纠
-恣
-观
-祖
-蛆
-碍
-位
-稿
-主
-澧
-跌
-筏
-京
-锏
-帝
-贴
-证
-糠
-才
-黄
-鲸
-略
-炯
-饱
-四
-出
-园
-犀
-牧
-容
-汉
-杆
-浈
-汰
-瑷
-造
-虫
-瘩
-怪
-驴
-济
-应
-花
-沣
-谔
-夙
-旅
-价
-矿
-以
-考
-s
-u
-呦
-晒
-巡
-茅
-准
-肟
-瓴
-詹
-仟
-褂
-译
-桌
-混
-宁
-怦
-郑
-抿
-些
-余
-鄂
-饴
-攒
-珑
-群
-阖
-岔
-琨
-藓
-预
-环
-洮
-岌
-宀
-杲
-瀵
-最
-常
-囡
-周
-踊
-女
-鼓
-袭
-喉
-简
-范
-薯
-遐
-疏
-粱
-黜
-禧
-法
-箔
-斤
-遥
-汝
-奥
-直
-贞
-撑
-置
-绱
-集
-她
-馅
-逗
-钧
-橱
-魉
-[
-恙
-躁
-唤
-9
-旺
-膘
-待
-脾
-惫
-购
-吗
-依
-盲
-度
-瘿
-蠖
-俾
-之
-镗
-拇
-鲵
-厝
-簧
-续
-款
-展
-啃
-表
-剔
-品
-钻
-腭
-损
-清
-锶
-统
-涌
-寸
-滨
-贪
-链
-吠
-冈
-伎
-迥
-咏
-吁
-览
-防
-迅
-失
-汾
-阔
-逵
-绀
-蔑
-列
-川
-凭
-努
-熨
-揪
-利
-俱
-绉
-抢
-鸨
-我
-即
-责
-膦
-易
-毓
-鹊
-刹
-玷
-岿
-空
-嘞
-绊
-排
-术
-估
-锷
-违
-们
-苟
-铜
-播
-肘
-件
-烫
-审
-鲂
-广
-像
-铌
-惰
-铟
-巳
-胍
-鲍
-康
-憧
-色
-恢
-想
-拷
-尤
-疳
-知
-S
-Y
-F
-D
-A
-峄
-裕
-帮
-握
-搔
-氐
-氘
-难
-墒
-沮
-雨
-叁
-缥
-悴
-藐
-湫
-娟
-苑
-稠
-颛
-簇
-后
-阕
-闭
-蕤
-缚
-怎
-佞
-码
-嘤
-蔡
-痊
-舱
-螯
-帕
-赫
-昵
-升
-烬
-岫
-、
-疵
-蜻
-髁
-蕨
-隶
-烛
-械
-丑
-盂
-梁
-强
-鲛
-由
-拘
-揉
-劭
-龟
-撤
-钩
-呕
-孛
-费
-妻
-漂
-求
-阑
-崖
-秤
-甘
-通
-深
-补
-赃
-坎
-床
-啪
-承
-吼
-量
-暇
-钼
-烨
-阂
-擎
-脱
-逮
-称
-P
-神
-属
-矗
-华
-届
-狍
-葑
-汹
-育
-患
-窒
-蛰
-佼
-静
-槎
-运
-鳗
-庆
-逝
-曼
-疱
-克
-代
-官
-此
-麸
-耧
-蚌
-晟
-例
-础
-榛
-副
-测
-唰
-缢
-迹
-灬
-霁
-身
-岁
-赭
-扛
-又
-菡
-乜
-雾
-板
-读
-陷
-徉
-贯
-郁
-虑
-变
-钓
-菜
-圾
-现
-琢
-式
-乐
-维
-渔
-浜
-左
-吾
-脑
-钡
-警
-T
-啵
-拴
-偌
-漱
-湿
-硕
-止
-骼
-魄
-积
-燥
-联
-踢
-玛
-则
-窿
-见
-振
-畿
-送
-班
-钽
-您
-赵
-刨
-印
-讨
-踝
-籍
-谡
-舌
-崧
-汽
-蔽
-沪
-酥
-绒
-怖
-财
-帖
-肱
-私
-莎
-勋
-羔
-霸
-励
-哼
-帐
-将
-帅
-渠
-纪
-婴
-娩
-岭
-厘
-滕
-吻
-伤
-坝
-冠
-戊
-隆
-瘁
-介
-涧
-物
-黍
-并
-姗
-奢
-蹑
-掣
-垸
-锴
-命
-箍
-捉
-病
-辖
-琰
-眭
-迩
-艘
-绌
-繁
-寅
-若
-毋
-思
-诉
-类
-诈
-燮
-轲
-酮
-狂
-重
-反
-职
-筱
-县
-委
-磕
-绣
-奖
-晋
-濉
-志
-徽
-肠
-呈
-獐
-坻
-口
-片
-碰
-几
-村
-柿
-劳
-料
-获
-亩
-惕
-晕
-厌
-号
-罢
-池
-正
-鏖
-煨
-家
-棕
-复
-尝
-懋
-蜥
-锅
-岛
-扰
-队
-坠
-瘾
-钬
-@
-卧
-疣
-镇
-譬
-冰
-彷
-频
-黯
-据
-垄
-采
-八
-缪
-瘫
-型
-熹
-砰
-楠
-襁
-箐
-但
-嘶
-绳
-啤
-拍
-盥
-穆
-傲
-洗
-盯
-塘
-怔
-筛
-丿
-台
-恒
-喂
-葛
-永
-¥
-烟
-酒
-桦
-书
-砂
-蚝
-缉
-态
-瀚
-袄
-圳
-轻
-蛛
-超
-榧
-遛
-姒
-奘
-铮
-右
-荽
-望
-偻
-卡
-丶
-氰
-附
-做
-革
-索
-戚
-坨
-桷
-唁
-垅
-榻
-岐
-偎
-坛
-莨
-山
-殊
-微
-骇
-陈
-爨
-推
-嗝
-驹
-澡
-藁
-呤
-卤
-嘻
-糅
-逛
-侵
-郓
-酌
-德
-摇
-※
-鬃
-被
-慨
-殡
-羸
-昌
-泡
-戛
-鞋
-河
-宪
-沿
-玲
-鲨
-翅
-哽
-源
-铅
-语
-照
-邯
-址
-荃
-佬
-顺
-鸳
-町
-霭
-睾
-瓢
-夸
-椁
-晓
-酿
-痈
-咔
-侏
-券
-噎
-湍
-签
-嚷
-离
-午
-尚
-社
-锤
-背
-孟
-使
-浪
-缦
-潍
-鞅
-军
-姹
-驶
-笑
-鳟
-鲁
-》
-孽
-钜
-绿
-洱
-礴
-焯
-椰
-颖
-囔
-乌
-孔
-巴
-互
-性
-椽
-哞
-聘
-昨
-早
-暮
-胶
-炀
-隧
-低
-彗
-昝
-铁
-呓
-氽
-藉
-喔
-癖
-瑗
-姨
-权
-胱
-韦
-堑
-蜜
-酋
-楝
-砝
-毁
-靓
-歙
-锲
-究
-屋
-喳
-骨
-辨
-碑
-武
-鸠
-宫
-辜
-烊
-适
-坡
-殃
-培
-佩
-供
-走
-蜈
-迟
-翼
-况
-姣
-凛
-浔
-吃
-飘
-债
-犟
-金
-促
-苛
-崇
-坂
-莳
-畔
-绂
-兵
-蠕
-斋
-根
-砍
-亢
-欢
-恬
-崔
-剁
-餐
-榫
-快
-扶
-‖
-濒
-缠
-鳜
-当
-彭
-驭
-浦
-篮
-昀
-锆
-秸
-钳
-弋
-娣
-瞑
-夷
-龛
-苫
-拱
-致
-%
-嵊
-障
-隐
-弑
-初
-娓
-抉
-汩
-累
-蓖
-"
-唬
-助
-苓
-昙
-押
-毙
-破
-城
-郧
-逢
-嚏
-獭
-瞻
-溱
-婿
-赊
-跨
-恼
-璧
-萃
-姻
-貉
-灵
-炉
-密
-氛
-陶
-砸
-谬
-衔
-点
-琛
-沛
-枳
-层
-岱
-诺
-脍
-榈
-埂
-征
-冷
-裁
-打
-蹴
-素
-瘘
-逞
-蛐
-聊
-激
-腱
-萘
-踵
-飒
-蓟
-吆
-取
-咙
-簋
-涓
-矩
-曝
-挺
-揣
-座
-你
-史
-舵
-焱
-尘
-苏
-笈
-脚
-溉
-榨
-诵
-樊
-邓
-焊
-义
-庶
-儋
-蟋
-蒲
-赦
-呷
-杞
-诠
-豪
-还
-试
-颓
-茉
-太
-除
-紫
-逃
-痴
-草
-充
-鳕
-珉
-祗
-墨
-渭
-烩
-蘸
-慕
-璇
-镶
-穴
-嵘
-恶
-骂
-险
-绋
-幕
-碉
-肺
-戳
-刘
-潞
-秣
-纾
-潜
-銮
-洛
-须
-罘
-销
-瘪
-汞
-兮
-屉
-r
-林
-厕
-质
-探
-划
-狸
-殚
-善
-煊
-烹
-〒
-锈
-逯
-宸
-辍
-泱
-柚
-袍
-远
-蹋
-嶙
-绝
-峥
-娥
-缍
-雀
-徵
-认
-镱
-谷
-=
-贩
-勉
-撩
-鄯
-斐
-洋
-非
-祚
-泾
-诒
-饿
-撬
-威
-晷
-搭
-芍
-锥
-笺
-蓦
-候
-琊
-档
-礁
-沼
-卵
-荠
-忑
-朝
-凹
-瑞
-头
-仪
-弧
-孵
-畏
-铆
-突
-衲
-车
-浩
-气
-茂
-悖
-厢
-枕
-酝
-戴
-湾
-邹
-飚
-攘
-锂
-写
-宵
-翁
-岷
-无
-喜
-丈
-挑
-嗟
-绛
-殉
-议
-槽
-具
-醇
-淞
-笃
-郴
-阅
-饼
-底
-壕
-砚
-弈
-询
-缕
-庹
-翟
-零
-筷
-暨
-舟
-闺
-甯
-撞
-麂
-茌
-蔼
-很
-珲
-捕
-棠
-角
-阉
-媛
-娲
-诽
-剿
-尉
-爵
-睬
-韩
-诰
-匣
-危
-糍
-镯
-立
-浏
-阳
-少
-盆
-舔
-擘
-匪
-申
-尬
-铣
-旯
-抖
-赘
-瓯
-居
-ˇ
-哮
-游
-锭
-茏
-歌
-坏
-甚
-秒
-舞
-沙
-仗
-劲
-潺
-阿
-燧
-郭
-嗖
-霏
-忠
-材
-奂
-耐
-跺
-砀
-输
-岖
-媳
-氟
-极
-摆
-灿
-今
-扔
-腻
-枝
-奎
-药
-熄
-吨
-话
-q
-额
-慑
-嘌
-协
-喀
-壳
-埭
-视
-著
-於
-愧
-陲
-翌
-峁
-颅
-佛
-腹
-聋
-侯
-咎
-叟
-秀
-颇
-存
-较
-罪
-哄
-岗
-扫
-栏
-钾
-羌
-己
-璨
-枭
-霉
-煌
-涸
-衿
-键
-镝
-益
-岢
-奏
-连
-夯
-睿
-冥
-均
-糖
-狞
-蹊
-稻
-爸
-刿
-胥
-煜
-丽
-肿
-璃
-掸
-跚
-灾
-垂
-樾
-濑
-乎
-莲
-窄
-犹
-撮
-战
-馄
-软
-络
-显
-鸢
-胸
-宾
-妲
-恕
-埔
-蝌
-份
-遇
-巧
-瞟
-粒
-恰
-剥
-桡
-博
-讯
-凯
-堇
-阶
-滤
-卖
-斌
-骚
-彬
-兑
-磺
-樱
-舷
-两
-娱
-福
-仃
-差
-找
-桁
-÷
-净
-把
-阴
-污
-戬
-雷
-碓
-蕲
-楚
-罡
-焖
-抽
-妫
-咒
-仑
-闱
-尽
-邑
-菁
-爱
-贷
-沥
-鞑
-牡
-嗉
-崴
-骤
-塌
-嗦
-订
-拮
-滓
-捡
-锻
-次
-坪
-杩
-臃
-箬
-融
-珂
-鹗
-宗
-枚
-降
-鸬
-妯
-阄
-堰
-盐
-毅
-必
-杨
-崃
-俺
-甬
-状
-莘
-货
-耸
-菱
-腼
-铸
-唏
-痤
-孚
-澳
-懒
-溅
-翘
-疙
-杷
-淼
-缙
-骰
-喊
-悉
-砻
-坷
-艇
-赁
-界
-谤
-纣
-宴
-晃
-茹
-归
-饭
-梢
-铡
-街
-抄
-肼
-鬟
-苯
-颂
-撷
-戈
-炒
-咆
-茭
-瘙
-负
-仰
-客
-琉
-铢
-封
-卑
-珥
-椿
-镧
-窨
-鬲
-寿
-御
-袤
-铃
-萎
-砖
-餮
-脒
-裳
-肪
-孕
-嫣
-馗
-嵇
-恳
-氯
-江
-石
-褶
-冢
-祸
-阻
-狈
-羞
-银
-靳
-透
-咳
-叼
-敷
-芷
-啥
-它
-瓤
-兰
-痘
-懊
-逑
-肌
-往
-捺
-坊
-甩
-呻
-〃
-沦
-忘
-膻
-祟
-菅
-剧
-崆
-智
-坯
-臧
-霍
-墅
-攻
-眯
-倘
-拢
-骠
-铐
-庭
-岙
-瓠
-′
-缺
-泥
-迢
-捶
-?
-?
-郏
-喙
-掷
-沌
-纯
-秘
-种
-听
-绘
-固
-螨
-团
-香
-盗
-妒
-埚
-蓝
-拖
-旱
-荞
-铀
-血
-遏
-汲
-辰
-叩
-拽
-幅
-硬
-惶
-桀
-漠
-措
-泼
-唑
-齐
-肾
-念
-酱
-虚
-屁
-耶
-旗
-砦
-闵
-婉
-馆
-拭
-绅
-韧
-忏
-窝
-醋
-葺
-顾
-辞
-倜
-堆
-辋
-逆
-玟
-贱
-疾
-董
-惘
-倌
-锕
-淘
-嘀
-莽
-俭
-笏
-绑
-鲷
-杈
-择
-蟀
-粥
-嗯
-驰
-逾
-案
-谪
-褓
-胫
-哩
-昕
-颚
-鲢
-绠
-躺
-鹄
-崂
-儒
-俨
-丝
-尕
-泌
-啊
-萸
-彰
-幺
-吟
-骄
-苣
-弦
-脊
-瑰
-〈
-诛
-镁
-析
-闪
-剪
-侧
-哟
-框
-螃
-守
-嬗
-燕
-狭
-铈
-缮
-概
-迳
-痧
-鲲
-俯
-售
-笼
-痣
-扉
-挖
-满
-咋
-援
-邱
-扇
-歪
-便
-玑
-绦
-峡
-蛇
-叨
-〖
-泽
-胃
-斓
-喋
-怂
-坟
-猪
-该
-蚬
-炕
-弥
-赞
-棣
-晔
-娠
-挲
-狡
-创
-疖
-铕
-镭
-稷
-挫
-弭
-啾
-翔
-粉
-履
-苘
-哦
-楼
-秕
-铂
-土
-锣
-瘟
-挣
-栉
-习
-享
-桢
-袅
-磨
-桂
-谦
-延
-坚
-蔚
-噗
-署
-谟
-猬
-钎
-恐
-嬉
-雒
-倦
-衅
-亏
-璩
-睹
-刻
-殿
-王
-算
-雕
-麻
-丘
-柯
-骆
-丸
-塍
-谚
-添
-鲈
-垓
-桎
-蚯
-芥
-予
-飕
-镦
-谌
-窗
-醚
-菀
-亮
-搪
-莺
-蒿
-羁
-足
-J
-真
-轶
-悬
-衷
-靛
-翊
-掩
-哒
-炅
-掐
-冼
-妮
-l
-谐
-稚
-荆
-擒
-犯
-陵
-虏
-浓
-崽
-刍
-陌
-傻
-孜
-千
-靖
-演
-矜
-钕
-煽
-杰
-酗
-渗
-伞
-栋
-俗
-泫
-戍
-罕
-沾
-疽
-灏
-煦
-芬
-磴
-叱
-阱
-榉
-湃
-蜀
-叉
-醒
-彪
-租
-郡
-篷
-屎
-良
-垢
-隗
-弱
-陨
-峪
-砷
-掴
-颁
-胎
-雯
-绵
-贬
-沐
-撵
-隘
-篙
-暖
-曹
-陡
-栓
-填
-臼
-彦
-瓶
-琪
-潼
-哪
-鸡
-摩
-啦
-俟
-锋
-域
-耻
-蔫
-疯
-纹
-撇
-毒
-绶
-痛
-酯
-忍
-爪
-赳
-歆
-嘹
-辕
-烈
-册
-朴
-钱
-吮
-毯
-癜
-娃
-谀
-邵
-厮
-炽
-璞
-邃
-丐
-追
-词
-瓒
-忆
-轧
-芫
-谯
-喷
-弟
-半
-冕
-裙
-掖
-墉
-绮
-寝
-苔
-势
-顷
-褥
-切
-衮
-君
-佳
-嫒
-蚩
-霞
-佚
-洙
-逊
-镖
-暹
-唛
-&
-殒
-顶
-碗
-獗
-轭
-铺
-蛊
-废
-恹
-汨
-崩
-珍
-那
-杵
-曲
-纺
-夏
-薰
-傀
-闳
-淬
-姘
-舀
-拧
-卷
-楂
-恍
-讪
-厩
-寮
-篪
-赓
-乘
-灭
-盅
-鞣
-沟
-慎
-挂
-饺
-鼾
-杳
-树
-缨
-丛
-絮
-娌
-臻
-嗳
-篡
-侩
-述
-衰
-矛
-圈
-蚜
-匕
-筹
-匿
-濞
-晨
-叶
-骋
-郝
-挚
-蚴
-滞
-增
-侍
-描
-瓣
-吖
-嫦
-蟒
-匾
-圣
-赌
-毡
-癞
-恺
-百
-曳
-需
-篓
-肮
-庖
-帏
-卿
-驿
-遗
-蹬
-鬓
-骡
-歉
-芎
-胳
-屐
-禽
-烦
-晌
-寄
-媾
-狄
-翡
-苒
-船
-廉
-终
-痞
-殇
-々
-畦
-饶
-改
-拆
-悻
-萄
-£
-瓿
-乃
-訾
-桅
-匮
-溧
-拥
-纱
-铍
-骗
-蕃
-龋
-缬
-父
-佐
-疚
-栎
-醍
-掳
-蓄
-x
-惆
-颜
-鲆
-榆
-〔
-猎
-敌
-暴
-谥
-鲫
-贾
-罗
-玻
-缄
-扦
-芪
-癣
-落
-徒
-臾
-恿
-猩
-托
-邴
-肄
-牵
-春
-陛
-耀
-刊
-拓
-蓓
-邳
-堕
-寇
-枉
-淌
-啡
-湄
-兽
-酷
-萼
-碚
-濠
-萤
-夹
-旬
-戮
-梭
-琥
-椭
-昔
-勺
-蜊
-绐
-晚
-孺
-僵
-宣
-摄
-冽
-旨
-萌
-忙
-蚤
-眉
-噼
-蟑
-付
-契
-瓜
-悼
-颡
-壁
-曾
-窕
-颢
-澎
-仿
-俑
-浑
-嵌
-浣
-乍
-碌
-褪
-乱
-蔟
-隙
-玩
-剐
-葫
-箫
-纲
-围
-伐
-决
-伙
-漩
-瑟
-刑
-肓
-镳
-缓
-蹭
-氨
-皓
-典
-畲
-坍
-铑
-檐
-塑
-洞
-倬
-储
-胴
-淳
-戾
-吐
-灼
-惺
-妙
-毕
-珐
-缈
-虱
-盖
-羰
-鸿
-磅
-谓
-髅
-娴
-苴
-唷
-蚣
-霹
-抨
-贤
-唠
-犬
-誓
-逍
-庠
-逼
-麓
-籼
-釉
-呜
-碧
-秧
-氩
-摔
-霄
-穸
-纨
-辟
-妈
-映
-完
-牛
-缴
-嗷
-炊
-恩
-荔
-茆
-掉
-紊
-慌
-莓
-羟
-阙
-萁
-磐
-另
-蕹
-辱
-鳐
-湮
-吡
-吩
-唐
-睦
-垠
-舒
-圜
-冗
-瞿
-溺
-芾
-囱
-匠
-僳
-汐
-菩
-饬
-漓
-黑
-霰
-浸
-濡
-窥
-毂
-蒡
-兢
-驻
-鹉
-芮
-诙
-迫
-雳
-厂
-忐
-臆
-猴
-鸣
-蚪
-栈
-箕
-羡
-渐
-莆
-捍
-眈
-哓
-趴
-蹼
-埕
-嚣
-骛
-宏
-淄
-斑
-噜
-严
-瑛
-垃
-椎
-诱
-压
-庾
-绞
-焘
-廿
-抡
-迄
-棘
-夫
-纬
-锹
-眨
-瞌
-侠
-脐
-竞
-瀑
-孳
-骧
-遁
-姜
-颦
-荪
-滚
-萦
-伪
-逸
-粳
-爬
-锁
-矣
-役
-趣
-洒
-颔
-诏
-逐
-奸
-甭
-惠
-攀
-蹄
-泛
-尼
-拼
-阮
-鹰
-亚
-颈
-惑
-勒
-〉
-际
-肛
-爷
-刚
-钨
-丰
-养
-冶
-鲽
-辉
-蔻
-画
-覆
-皴
-妊
-麦
-返
-醉
-皂
-擀
-〗
-酶
-凑
-粹
-悟
-诀
-硖
-港
-卜
-z
-杀
-涕
-±
-舍
-铠
-抵
-弛
-段
-敝
-镐
-奠
-拂
-轴
-跛
-袱
-e
-t
-沉
-菇
-俎
-薪
-峦
-秭
-蟹
-历
-盟
-菠
-寡
-液
-肢
-喻
-染
-裱
-悱
-抱
-氙
-赤
-捅
-猛
-跑
-氮
-谣
-仁
-尺
-辊
-窍
-烙
-衍
-架
-擦
-倏
-璐
-瑁
-币
-楞
-胖
-夔
-趸
-邛
-惴
-饕
-虔
-蝎
-§
-哉
-贝
-宽
-辫
-炮
-扩
-饲
-籽
-魏
-菟
-锰
-伍
-猝
-末
-琳
-哚
-蛎
-邂
-呀
-姿
-鄞
-却
-歧
-仙
-恸
-椐
-森
-牒
-寤
-袒
-婆
-虢
-雅
-钉
-朵
-贼
-欲
-苞
-寰
-故
-龚
-坭
-嘘
-咫
-礼
-硷
-兀
-睢
-汶
-’
-铲
-烧
-绕
-诃
-浃
-钿
-哺
-柜
-讼
-颊
-璁
-腔
-洽
-咐
-脲
-簌
-筠
-镣
-玮
-鞠
-谁
-兼
-姆
-挥
-梯
-蝴
-谘
-漕
-刷
-躏
-宦
-弼
-b
-垌
-劈
-麟
-莉
-揭
-笙
-渎
-仕
-嗤
-仓
-配
-怏
-抬
-错
-泯
-镊
-孰
-猿
-邪
-仍
-秋
-鼬
-壹
-歇
-吵
-炼
-<
-尧
-射
-柬
-廷
-胧
-霾
-凳
-隋
-肚
-浮
-梦
-祥
-株
-堵
-退
-L
-鹫
-跎
-凶
-毽
-荟
-炫
-栩
-玳
-甜
-沂
-鹿
-顽
-伯
-爹
-赔
-蛴
-徐
-匡
-欣
-狰
-缸
-雹
-蟆
-疤
-默
-沤
-啜
-痂
-衣
-禅
-w
-i
-h
-辽
-葳
-黝
-钗
-停
-沽
-棒
-馨
-颌
-肉
-吴
-硫
-悯
-劾
-娈
-马
-啧
-吊
-悌
-镑
-峭
-帆
-瀣
-涉
-咸
-疸
-滋
-泣
-翦
-拙
-癸
-钥
-蜒
-+
-尾
-庄
-凝
-泉
-婢
-渴
-谊
-乞
-陆
-锉
-糊
-鸦
-淮
-I
-B
-N
-晦
-弗
-乔
-庥
-葡
-尻
-席
-橡
-傣
-渣
-拿
-惩
-麋
-斛
-缃
-矮
-蛏
-岘
-鸽
-姐
-膏
-催
-奔
-镒
-喱
-蠡
-摧
-钯
-胤
-柠
-拐
-璋
-鸥
-卢
-荡
-倾
-^
-_
-珀
-逄
-萧
-塾
-掇
-贮
-笆
-聂
-圃
-冲
-嵬
-M
-滔
-笕
-值
-炙
-偶
-蜱
-搐
-梆
-汪
-蔬
-腑
-鸯
-蹇
-敞
-绯
-仨
-祯
-谆
-梧
-糗
-鑫
-啸
-豺
-囹
-猾
-巢
-柄
-瀛
-筑
-踌
-沭
-暗
-苁
-鱿
-蹉
-脂
-蘖
-牢
-热
-木
-吸
-溃
-宠
-序
-泞
-偿
-拜
-檩
-厚
-朐
-毗
-螳
-吞
-媚
-朽
-担
-蝗
-橘
-畴
-祈
-糟
-盱
-隼
-郜
-惜
-珠
-裨
-铵
-焙
-琚
-唯
-咚
-噪
-骊
-丫
-滢
-勤
-棉
-呸
-咣
-淀
-隔
-蕾
-窈
-饨
-挨
-煅
-短
-匙
-粕
-镜
-赣
-撕
-墩
-酬
-馁
-豌
-颐
-抗
-酣
-氓
-佑
-搁
-哭
-递
-耷
-涡
-桃
-贻
-碣
-截
-瘦
-昭
-镌
-蔓
-氚
-甲
-猕
-蕴
-蓬
-散
-拾
-纛
-狼
-猷
-铎
-埋
-旖
-矾
-讳
-囊
-糜
-迈
-粟
-蚂
-紧
-鲳
-瘢
-栽
-稼
-羊
-锄
-斟
-睁
-桥
-瓮
-蹙
-祉
-醺
-鼻
-昱
-剃
-跳
-篱
-跷
-蒜
-翎
-宅
-晖
-嗑
-壑
-峻
-癫
-屏
-狠
-陋
-袜
-途
-憎
-祀
-莹
-滟
-佶
-溥
-臣
-约
-盛
-峰
-磁
-慵
-婪
-拦
-莅
-朕
-鹦
-粲
-裤
-哎
-疡
-嫖
-琵
-窟
-堪
-谛
-嘉
-儡
-鳝
-斩
-郾
-驸
-酊
-妄
-胜
-贺
-徙
-傅
-噌
-钢
-栅
-庇
-恋
-匝
-巯
-邈
-尸
-锚
-粗
-佟
-蛟
-薹
-纵
-蚊
-郅
-绢
-锐
-苗
-俞
-篆
-淆
-膀
-鲜
-煎
-诶
-秽
-寻
-涮
-刺
-怀
-噶
-巨
-褰
-魅
-灶
-灌
-桉
-藕
-谜
-舸
-薄
-搀
-恽
-借
-牯
-痉
-渥
-愿
-亓
-耘
-杠
-柩
-锔
-蚶
-钣
-珈
-喘
-蹒
-幽
-赐
-稗
-晤
-莱
-泔
-扯
-肯
-菪
-裆
-腩
-豉
-疆
-骜
-腐
-倭
-珏
-唔
-粮
-亡
-润
-慰
-伽
-橄
-玄
-誉
-醐
-胆
-龊
-粼
-塬
-陇
-彼
-削
-嗣
-绾
-芽
-妗
-垭
-瘴
-爽
-薏
-寨
-龈
-泠
-弹
-赢
-漪
-猫
-嘧
-涂
-恤
-圭
-茧
-烽
-屑
-痕
-巾
-赖
-荸
-凰
-腮
-畈
-亵
-蹲
-偃
-苇
-澜
-艮
-换
-骺
-烘
-苕
-梓
-颉
-肇
-哗
-悄
-氤
-涠
-葬
-屠
-鹭
-植
-竺
-佯
-诣
-鲇
-瘀
-鲅
-邦
-移
-滁
-冯
-耕
-癔
-戌
-茬
-沁
-巩
-悠
-湘
-洪
-痹
-锟
-循
-谋
-腕
-鳃
-钠
-捞
-焉
-迎
-碱
-伫
-急
-榷
-奈
-邝
-卯
-辄
-皲
-卟
-醛
-畹
-忧
-稳
-雄
-昼
-缩
-阈
-睑
-扌
-耗
-曦
-涅
-捏
-瞧
-邕
-淖
-漉
-铝
-耦
-禹
-湛
-喽
-莼
-琅
-诸
-苎
-纂
-硅
-始
-嗨
-傥
-燃
-臂
-赅
-嘈
-呆
-贵
-屹
-壮
-肋
-亍
-蚀
-卅
-豹
-腆
-邬
-迭
-浊
-}
-童
-螂
-捐
-圩
-勐
-触
-寞
-汊
-壤
-荫
-膺
-渌
-芳
-懿
-遴
-螈
-泰
-蓼
-蛤
-茜
-舅
-枫
-朔
-膝
-眙
-避
-梅
-判
-鹜
-璜
-牍
-缅
-垫
-藻
-黔
-侥
-惚
-懂
-踩
-腰
-腈
-札
-丞
-唾
-慈
-顿
-摹
-荻
-琬
-~
-斧
-沈
-滂
-胁
-胀
-幄
-莜
-Z
-匀
-鄄
-掌
-绰
-茎
-焚
-赋
-萱
-谑
-汁
-铒
-瞎
-夺
-蜗
-野
-娆
-冀
-弯
-篁
-懵
-灞
-隽
-芡
-脘
-俐
-辩
-芯
-掺
-喏
-膈
-蝈
-觐
-悚
-踹
-蔗
-熠
-鼠
-呵
-抓
-橼
-峨
-畜
-缔
-禾
-崭
-弃
-熊
-摒
-凸
-拗
-穹
-蒙
-抒
-祛
-劝
-闫
-扳
-阵
-醌
-踪
-喵
-侣
-搬
-仅
-荧
-赎
-蝾
-琦
-买
-婧
-瞄
-寓
-皎
-冻
-赝
-箩
-莫
-瞰
-郊
-笫
-姝
-筒
-枪
-遣
-煸
-袋
-舆
-痱
-涛
-母
-〇
-启
-践
-耙
-绲
-盘
-遂
-昊
-搞
-槿
-诬
-纰
-泓
-惨
-檬
-亻
-越
-C
-o
-憩
-熵
-祷
-钒
-暧
-塔
-阗
-胰
-咄
-娶
-魔
-琶
-钞
-邻
-扬
-杉
-殴
-咽
-弓
-〆
-髻
-】
-吭
-揽
-霆
-拄
-殖
-脆
-彻
-岩
-芝
-勃
-辣
-剌
-钝
-嘎
-甄
-佘
-皖
-伦
-授
-徕
-憔
-挪
-皇
-庞
-稔
-芜
-踏
-溴
-兖
-卒
-擢
-饥
-鳞
-煲
-‰
-账
-颗
-叻
-斯
-捧
-鳍
-琮
-讹
-蛙
-纽
-谭
-酸
-兔
-莒
-睇
-伟
-觑
-羲
-嗜
-宜
-褐
-旎
-辛
-卦
-诘
-筋
-鎏
-溪
-挛
-熔
-阜
-晰
-鳅
-丢
-奚
-灸
-呱
-献
-陉
-黛
-鸪
-甾
-萨
-疮
-拯
-洲
-疹
-辑
-叙
-恻
-谒
-允
-柔
-烂
-氏
-逅
-漆
-拎
-惋
-扈
-湟
-纭
-啕
-掬
-擞
-哥
-忽
-涤
-鸵
-靡
-郗
-瓷
-扁
-廊
-怨
-雏
-钮
-敦
-E
-懦
-憋
-汀
-拚
-啉
-腌
-岸
-f
-痼
-瞅
-尊
-咀
-眩
-飙
-忌
-仝
-迦
-熬
-毫
-胯
-篑
-茄
-腺
-凄
-舛
-碴
-锵
-诧
-羯
-後
-漏
-汤
-宓
-仞
-蚁
-壶
-谰
-皑
-铄
-棰
-罔
-辅
-晶
-苦
-牟
-闽
-\
-烃
-饮
-聿
-丙
-蛳
-朱
-煤
-涔
-鳖
-犁
-罐
-荼
-砒
-淦
-妤
-黏
-戎
-孑
-婕
-瑾
-戢
-钵
-枣
-捋
-砥
-衩
-狙
-桠
-稣
-阎
-肃
-梏
-诫
-孪
-昶
-婊
-衫
-嗔
-侃
-塞
-蜃
-樵
-峒
-貌
-屿
-欺
-缫
-阐
-栖
-诟
-珞
-荭
-吝
-萍
-嗽
-恂
-啻
-蜴
-磬
-峋
-俸
-豫
-谎
-徊
-镍
-韬
-魇
-晴
-U
-囟
-猜
-蛮
-坐
-囿
-伴
-亭
-肝
-佗
-蝠
-妃
-胞
-滩
-榴
-氖
-垩
-苋
-砣
-扪
-馏
-姓
-轩
-厉
-夥
-侈
-禀
-垒
-岑
-赏
-钛
-辐
-痔
-披
-纸
-碳
-“
-坞
-蠓
-挤
-荥
-沅
-悔
-铧
-帼
-蒌
-蝇
-a
-p
-y
-n
-g
-哀
-浆
-瑶
-凿
-桶
-馈
-皮
-奴
-苜
-佤
-伶
-晗
-铱
-炬
-优
-弊
-氢
-恃
-甫
-攥
-端
-锌
-灰
-稹
-炝
-曙
-邋
-亥
-眶
-碾
-拉
-萝
-绔
-捷
-浍
-腋
-姑
-菖
-凌
-涞
-麽
-锢
-桨
-潢
-绎
-镰
-殆
-锑
-渝
-铬
-困
-绽
-觎
-匈
-糙
-暑
-裹
-鸟
-盔
-肽
-迷
-綦
-『
-亳
-佝
-俘
-钴
-觇
-骥
-仆
-疝
-跪
-婶
-郯
-瀹
-唉
-脖
-踞
-针
-晾
-忒
-扼
-瞩
-叛
-椒
-疟
-嗡
-邗
-肆
-跆
-玫
-忡
-捣
-咧
-唆
-艄
-蘑
-潦
-笛
-阚
-沸
-泻
-掊
-菽
-贫
-斥
-髂
-孢
-镂
-赂
-麝
-鸾
-屡
-衬
-苷
-恪
-叠
-希
-粤
-爻
-喝
-茫
-惬
-郸
-绻
-庸
-撅
-碟
-宄
-妹
-膛
-叮
-饵
-崛
-嗲
-椅
-冤
-搅
-咕
-敛
-尹
-垦
-闷
-蝉
-霎
-勰
-败
-蓑
-泸
-肤
-鹌
-幌
-焦
-浠
-鞍
-刁
-舰
-乙
-竿
-裔
-。
-茵
-函
-伊
-兄
-丨
-娜
-匍
-謇
-莪
-宥
-似
-蝽
-翳
-酪
-翠
-粑
-薇
-祢
-骏
-赠
-叫
-Q
-噤
-噻
-竖
-芗
-莠
-潭
-俊
-羿
-耜
-O
-郫
-趁
-嗪
-囚
-蹶
-芒
-洁
-笋
-鹑
-敲
-硝
-啶
-堡
-渲
-揩
-』
-携
-宿
-遒
-颍
-扭
-棱
-割
-萜
-蔸
-葵
-琴
-捂
-饰
-衙
-耿
-掠
-募
-岂
-窖
-涟
-蔺
-瘤
-柞
-瞪
-怜
-匹
-距
-楔
-炜
-哆
-秦
-缎
-幼
-茁
-绪
-痨
-恨
-楸
-娅
-瓦
-桩
-雪
-嬴
-伏
-榔
-妥
-铿
-拌
-眠
-雍
-缇
-‘
-卓
-搓
-哌
-觞
-噩
-屈
-哧
-髓
-咦
-巅
-娑
-侑
-淫
-膳
-祝
-勾
-姊
-莴
-胄
-疃
-薛
-蜷
-胛
-巷
-芙
-芋
-熙
-闰
-勿
-窃
-狱
-剩
-钏
-幢
-陟
-铛
-慧
-靴
-耍
-k
-浙
-浇
-飨
-惟
-绗
-祜
-澈
-啼
-咪
-磷
-摞
-诅
-郦
-抹
-跃
-壬
-吕
-肖
-琏
-颤
-尴
-剡
-抠
-凋
-赚
-泊
-津
-宕
-殷
-倔
-氲
-漫
-邺
-涎
-怠
-$
-垮
-荬
-遵
-俏
-叹
-噢
-饽
-蜘
-孙
-筵
-疼
-鞭
-羧
-牦
-箭
-潴
-c
-眸
-祭
-髯
-啖
-坳
-愁
-芩
-驮
-倡
-巽
-穰
-沃
-胚
-怒
-凤
-槛
-剂
-趵
-嫁
-v
-邢
-灯
-鄢
-桐
-睽
-檗
-锯
-槟
-婷
-嵋
-圻
-诗
-蕈
-颠
-遭
-痢
-芸
-怯
-馥
-竭
-锗
-徜
-恭
-遍
-籁
-剑
-嘱
-苡
-龄
-僧
-桑
-潸
-弘
-澶
-楹
-悲
-讫
-愤
-腥
-悸
-谍
-椹
-呢
-桓
-葭
-攫
-阀
-翰
-躲
-敖
-柑
-郎
-笨
-橇
-呃
-魁
-燎
-脓
-葩
-磋
-垛
-玺
-狮
-沓
-砜
-蕊
-锺
-罹
-蕉
-翱
-虐
-闾
-巫
-旦
-茱
-嬷
-枯
-鹏
-贡
-芹
-汛
-矫
-绁
-拣
-禺
-佃
-讣
-舫
-惯
-乳
-趋
-疲
-挽
-岚
-虾
-衾
-蠹
-蹂
-飓
-氦
-铖
-孩
-稞
-瑜
-壅
-掀
-勘
-妓
-畅
-髋
-W
-庐
-牲
-蓿
-榕
-练
-垣
-唱
-邸
-菲
-昆
-婺
-穿
-绡
-麒
-蚱
-掂
-愚
-泷
-涪
-漳
-妩
-娉
-榄
-讷
-觅
-旧
-藤
-煮
-呛
-柳
-腓
-叭
-庵
-烷
-阡
-罂
-蜕
-擂
-猖
-咿
-媲
-脉
-【
-沏
-貅
-黠
-熏
-哲
-烁
-坦
-酵
-兜
-×
-潇
-撒
-剽
-珩
-圹
-乾
-摸
-樟
-帽
-嗒
-襄
-魂
-轿
-憬
-锡
-〕
-喃
-皆
-咖
-隅
-脸
-残
-泮
-袂
-鹂
-珊
-囤
-捆
-咤
-误
-徨
-闹
-淙
-芊
-淋
-怆
-囗
-拨
-梳
-渤
-R
-G
-绨
-蚓
-婀
-幡
-狩
-麾
-谢
-唢
-裸
-旌
-伉
-纶
-裂
-驳
-砼
-咛
-澄
-樨
-蹈
-宙
-澍
-倍
-貔
-操
-勇
-蟠
-摈
-砧
-虬
-够
-缁
-悦
-藿
-撸
-艹
-摁
-淹
-豇
-虎
-榭
-ˉ
-吱
-d
-°
-喧
-荀
-踱
-侮
-奋
-偕
-饷
-犍
-惮
-坑
-璎
-徘
-宛
-妆
-袈
-倩
-窦
-昂
-荏
-乖
-K
-怅
-撰
-鳙
-牙
-袁
-酞
-X
-痿
-琼
-闸
-雁
-趾
-荚
-虻
-涝
-《
-杏
-韭
-偈
-烤
-绫
-鞘
-卉
-症
-遢
-蓥
-诋
-杭
-荨
-匆
-竣
-簪
-辙
-敕
-虞
-丹
-缭
-咩
-黟
-m
-淤
-瑕
-咂
-铉
-硼
-茨
-嶂
-痒
-畸
-敬
-涿
-粪
-窘
-熟
-叔
-嫔
-盾
-忱
-裘
-憾
-梵
-赡
-珙
-咯
-娘
-庙
-溯
-胺
-葱
-痪
-摊
-荷
-卞
-乒
-髦
-寐
-铭
-坩
-胗
-枷
-爆
-溟
-嚼
-羚
-砬
-轨
-惊
-挠
-罄
-竽
-菏
-氧
-浅
-楣
-盼
-枢
-炸
-阆
-杯
-谏
-噬
-淇
-渺
-俪
-秆
-墓
-泪
-跻
-砌
-痰
-垡
-渡
-耽
-釜
-讶
-鳎
-煞
-呗
-韶
-舶
-绷
-鹳
-缜
-旷
-铊
-皱
-龌
-檀
-霖
-奄
-槐
-艳
-蝶
-旋
-哝
-赶
-骞
-蚧
-腊
-盈
-丁
-`
-蜚
-矸
-蝙
-睨
-嚓
-僻
-鬼
-醴
-夜
-彝
-磊
-笔
-拔
-栀
-糕
-厦
-邰
-纫
-逭
-纤
-眦
-膊
-馍
-躇
-烯
-蘼
-冬
-诤
-暄
-骶
-哑
-瘠
-」
-臊
-丕
-愈
-咱
-螺
-擅
-跋
-搏
-硪
-谄
-笠
-淡
-嘿
-骅
-谧
-鼎
-皋
-姚
-歼
-蠢
-驼
-耳
-胬
-挝
-涯
-狗
-蒽
-孓
-犷
-凉
-芦
-箴
-铤
-孤
-嘛
-坤
-V
-茴
-朦
-挞
-尖
-橙
-诞
-搴
-碇
-洵
-浚
-帚
-蜍
-漯
-柘
-嚎
-讽
-芭
-荤
-咻
-祠
-秉
-跖
-埃
-吓
-糯
-眷
-馒
-惹
-娼
-鲑
-嫩
-讴
-轮
-瞥
-靶
-褚
-乏
-缤
-宋
-帧
-删
-驱
-碎
-扑
-俩
-俄
-偏
-涣
-竹
-噱
-皙
-佰
-渚
-唧
-斡
-#
-镉
-刀
-崎
-筐
-佣
-夭
-贰
-肴
-峙
-哔
-艿
-匐
-牺
-镛
-缘
-仡
-嫡
-劣
-枸
-堀
-梨
-簿
-鸭
-蒸
-亦
-稽
-浴
-{
-衢
-束
-槲
-j
-阁
-揍
-疥
-棋
-潋
-聪
-窜
-乓
-睛
-插
-冉
-阪
-苍
-搽
-「
-蟾
-螟
-幸
-仇
-樽
-撂
-慢
-跤
-幔
-俚
-淅
-覃
-觊
-溶
-妖
-帛
-侨
-曰
-妾
-泗
-·
-:
-瀘
-風
-Ë
-(
-)
-∶
-紅
-紗
-瑭
-雲
-頭
-鶏
-財
-許
-•
-¥
-樂
-焗
-麗
-—
-;
-滙
-東
-榮
-繪
-興
-…
-門
-業
-π
-楊
-國
-顧
-é
-盤
-寳
-Λ
-龍
-鳳
-島
-誌
-緣
-結
-銭
-萬
-勝
-祎
-璟
-優
-歡
-臨
-時
-購
-=
-★
-藍
-昇
-鐵
-觀
-勅
-農
-聲
-畫
-兿
-術
-發
-劉
-記
-專
-耑
-園
-書
-壴
-種
-Ο
-●
-褀
-號
-銀
-匯
-敟
-锘
-葉
-橪
-廣
-進
-蒄
-鑽
-阝
-祙
-貢
-鍋
-豊
-夬
-喆
-團
-閣
-開
-燁
-賓
-館
-酡
-沔
-順
-+
-硚
-劵
-饸
-陽
-車
-湓
-復
-萊
-氣
-軒
-華
-堃
-迮
-纟
-戶
-馬
-學
-裡
-電
-嶽
-獨
-マ
-シ
-サ
-ジ
-燘
-袪
-環
-❤
-臺
-灣
-専
-賣
-孖
-聖
-攝
-線
-▪
-α
-傢
-俬
-夢
-達
-莊
-喬
-貝
-薩
-劍
-羅
-壓
-棛
-饦
-尃
-璈
-囍
-醫
-G
-I
-A
-#
-N
-鷄
-髙
-嬰
-啓
-約
-隹
-潔
-賴
-藝
-~
-寶
-籣
-麺
-
-嶺
-√
-義
-網
-峩
-長
-∧
-魚
-機
-構
-②
-鳯
-偉
-L
-B
-㙟
-畵
-鴿
-'
-詩
-溝
-嚞
-屌
-藔
-佧
-玥
-蘭
-織
-1
-3
-9
-0
-7
-點
-砭
-鴨
-鋪
-銘
-廳
-弍
-‧
-創
-湯
-坶
-℃
-卩
-骝
-&
-烜
-荘
-當
-潤
-扞
-係
-懷
-碶
-钅
-蚨
-讠
-☆
-叢
-爲
-埗
-涫
-塗
-→
-楽
-現
-鯨
-愛
-瑪
-鈺
-忄
-悶
-藥
-飾
-樓
-視
-孬
-ㆍ
-燚
-苪
-師
-①
-丼
-锽
-│
-韓
-標
-è
-兒
-閏
-匋
-張
-漢
-Ü
-髪
-會
-閑
-檔
-習
-裝
-の
-峯
-菘
-輝
-И
-雞
-釣
-億
-浐
-K
-O
-R
-8
-H
-E
-P
-T
-W
-D
-S
-C
-M
-F
-姌
-饹
-»
-晞
-廰
-ä
-嵯
-鷹
-負
-飲
-絲
-冚
-楗
-澤
-綫
-區
-❋
-←
-質
-靑
-揚
-③
-滬
-統
-産
-協
-﹑
-乸
-畐
-經
-運
-際
-洺
-岽
-為
-粵
-諾
-崋
-豐
-碁
-ɔ
-V
-2
-6
-齋
-誠
-訂
-´
-勑
-雙
-陳
-無
-í
-泩
-媄
-夌
-刂
-i
-c
-t
-o
-r
-a
-嘢
-耄
-燴
-暃
-壽
-媽
-靈
-抻
-體
-唻
-É
-冮
-甹
-鎮
-錦
-ʌ
-蜛
-蠄
-尓
-駕
-戀
-飬
-逹
-倫
-貴
-極
-Я
-Й
-寬
-磚
-嶪
-郎
-職
-|
-間
-n
-d
-剎
-伈
-課
-飛
-橋
-瘊
-№
-譜
-骓
-圗
-滘
-縣
-粿
-咅
-養
-濤
-彳
-®
-%
-Ⅱ
-啰
-㴪
-見
-矞
-薬
-糁
-邨
-鲮
-顔
-罱
-З
-選
-話
-贏
-氪
-俵
-競
-瑩
-繡
-枱
-β
-綉
-á
-獅
-爾
-™
-麵
-戋
-淩
-徳
-個
-劇
-場
-務
-簡
-寵
-h
-實
-膠
-轱
-圖
-築
-嘣
-樹
-㸃
-營
-耵
-孫
-饃
-鄺
-飯
-麯
-遠
-輸
-坫
-孃
-乚
-閃
-鏢
-㎡
-題
-廠
-關
-↑
-爺
-將
-軍
-連
-篦
-覌
-參
-箸
--
-窠
-棽
-寕
-夀
-爰
-歐
-呙
-閥
-頡
-熱
-雎
-垟
-裟
-凬
-勁
-帑
-馕
-夆
-疌
-枼
-馮
-貨
-蒤
-樸
-彧
-旸
-靜
-龢
-暢
-㐱
-鳥
-珺
-鏡
-灡
-爭
-堷
-廚
-Ó
-騰
-診
-┅
-蘇
-褔
-凱
-頂
-豕
-亞
-帥
-嘬
-⊥
-仺
-桖
-複
-饣
-絡
-穂
-顏
-棟
-納
-▏
-濟
-親
-設
-計
-攵
-埌
-烺
-ò
-頤
-燦
-蓮
-撻
-節
-講
-濱
-濃
-娽
-洳
-朿
-燈
-鈴
-護
-膚
-铔
-過
-補
-Z
-U
-5
-4
-坋
-闿
-䖝
-餘
-缐
-铞
-貿
-铪
-桼
-趙
-鍊
-[
-㐂
-垚
-菓
-揸
-捲
-鐘
-滏
-𣇉
-爍
-輪
-燜
-鴻
-鮮
-動
-鹞
-鷗
-丄
-慶
-鉌
-翥
-飮
-腸
-⇋
-漁
-覺
-來
-熘
-昴
-翏
-鲱
-圧
-鄉
-萭
-頔
-爐
-嫚
-г
-貭
-類
-聯
-幛
-輕
-訓
-鑒
-夋
-锨
-芃
-珣
-䝉
-扙
-嵐
-銷
-處
-ㄱ
-語
-誘
-苝
-歸
-儀
-燒
-楿
-內
-粢
-葒
-奧
-麥
-礻
-滿
-蠔
-穵
-瞭
-態
-鱬
-榞
-硂
-鄭
-黃
-煙
-祐
-奓
-逺
-*
-瑄
-獲
-聞
-薦
-讀
-這
-樣
-決
-問
-啟
-們
-執
-説
-轉
-單
-隨
-唘
-帶
-倉
-庫
-還
-贈
-尙
-皺
-■
-餅
-產
-○
-∈
-報
-狀
-楓
-賠
-琯
-嗮
-禮
-`
-傳
->
-≤
-嗞
-Φ
-≥
-換
-咭
-∣
-↓
-曬
-ε
-応
-寫
-″
-終
-様
-純
-費
-療
-聨
-凍
-壐
-郵
-ü
-黒
-∫
-製
-塊
-調
-軽
-確
-撃
-級
-馴
-Ⅲ
-涇
-繹
-數
-碼
-證
-狒
-処
-劑
-<
-晧
-賀
-衆
-]
-櫥
-兩
-陰
-絶
-對
-鯉
-憶
-◎
-p
-e
-Y
-蕒
-煖
-頓
-測
-試
-鼽
-僑
-碩
-妝
-帯
-≈
-鐡
-舖
-權
-喫
-倆
-ˋ
-該
-悅
-ā
-俫
-.
-f
-s
-b
-m
-k
-g
-u
-j
-貼
-淨
-濕
-針
-適
-備
-l
-/
-給
-謢
-強
-觸
-衛
-與
-⊙
-$
-緯
-變
-⑴
-⑵
-⑶
-㎏
-殺
-∩
-幚
-─
-價
-▲
-離
-ú
-ó
-飄
-烏
-関
-閟
-﹝
-﹞
-邏
-輯
-鍵
-驗
-訣
-導
-歷
-屆
-層
-▼
-儱
-錄
-熳
-ē
-艦
-吋
-錶
-辧
-飼
-顯
-④
-禦
-販
-気
-対
-枰
-閩
-紀
-幹
-瞓
-貊
-淚
-△
-眞
-墊
-Ω
-獻
-褲
-縫
-緑
-亜
-鉅
-餠
-{
-}
-◆
-蘆
-薈
-█
-◇
-溫
-彈
-晳
-粧
-犸
-穩
-訊
-崬
-凖
-熥
-П
-舊
-條
-紋
-圍
-Ⅳ
-筆
-尷
-難
-雜
-錯
-綁
-識
-頰
-鎖
-艶
-□
-殁
-殼
-⑧
-├
-▕
-鵬
-ǐ
-ō
-ǒ
-糝
-綱
-▎
-μ
-盜
-饅
-醬
-籤
-蓋
-釀
-鹽
-據
-à
-ɡ
-辦
-◥
-彐
-┌
-婦
-獸
-鲩
-伱
-ī
-蒟
-蒻
-齊
-袆
-腦
-寧
-凈
-妳
-煥
-詢
-偽
-謹
-啫
-鯽
-騷
-鱸
-損
-傷
-鎻
-髮
-買
-冏
-儥
-両
-﹢
-∞
-載
-喰
-z
-羙
-悵
-燙
-曉
-員
-組
-徹
-艷
-痠
-鋼
-鼙
-縮
-細
-嚒
-爯
-≠
-維
-"
-鱻
-壇
-厍
-帰
-浥
-犇
-薡
-軎
-²
-應
-醜
-刪
-緻
-鶴
-賜
-噁
-軌
-尨
-镔
-鷺
-槗
-彌
-葚
-濛
-請
-溇
-緹
-賢
-訪
-獴
-瑅
-資
-縤
-陣
-蕟
-栢
-韻
-祼
-恁
-伢
-謝
-劃
-涑
-總
-衖
-踺
-砋
-凉
-籃
-駿
-苼
-瘋
-昽
-紡
-驊
-腎
-﹗
-響
-杋
-剛
-嚴
-禪
-歓
-槍
-傘
-檸
-檫
-炣
-勢
-鏜
-鎢
-銑
-尐
-減
-奪
-惡
-θ
-僮
-婭
-臘
-ū
-ì
-殻
-鉄
-∑
-蛲
-焼
-緖
-續
-紹
-懮
\ No newline at end of file
diff --git a/godo/deps/windows.go b/godo/deps/windows.go
deleted file mode 100644
index 4bec41b..0000000
--- a/godo/deps/windows.go
+++ /dev/null
@@ -1,27 +0,0 @@
-//go:build windows
-
-/*
- * GodoAI - A software focused on localizing AI applications
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package deps
-
-import (
- _ "embed"
-)
-
-//go:embed windows.zip
-var embeddedZip []byte
diff --git a/godo/deps/windows/goconv/pdf/pdfimages.exe b/godo/deps/windows/goconv/pdf/pdfimages.exe
deleted file mode 100644
index 08a87cd..0000000
Binary files a/godo/deps/windows/goconv/pdf/pdfimages.exe and /dev/null differ
diff --git a/godo/deps/windows/goconv/pdf/pdftohtml.exe b/godo/deps/windows/goconv/pdf/pdftohtml.exe
deleted file mode 100644
index ad2f55b..0000000
Binary files a/godo/deps/windows/goconv/pdf/pdftohtml.exe and /dev/null differ
diff --git a/godo/deps/windows/goconv/pdf/pdftopng.exe b/godo/deps/windows/goconv/pdf/pdftopng.exe
deleted file mode 100644
index dbe2dd0..0000000
Binary files a/godo/deps/windows/goconv/pdf/pdftopng.exe and /dev/null differ
diff --git a/godo/deps/windows/goconv/rapid/RapidOcrOnnx.exe b/godo/deps/windows/goconv/rapid/RapidOcrOnnx.exe
deleted file mode 100644
index bfda6c7..0000000
Binary files a/godo/deps/windows/goconv/rapid/RapidOcrOnnx.exe and /dev/null differ
diff --git a/godo/deps/windows/goconv/rapid/models/ch_PP-OCRv4_det_infer-v7.onnx b/godo/deps/windows/goconv/rapid/models/ch_PP-OCRv4_det_infer-v7.onnx
deleted file mode 100644
index be54729..0000000
Binary files a/godo/deps/windows/goconv/rapid/models/ch_PP-OCRv4_det_infer-v7.onnx and /dev/null differ
diff --git a/godo/deps/windows/goconv/rapid/models/ch_PP-OCRv4_rec_infer-v7.onnx b/godo/deps/windows/goconv/rapid/models/ch_PP-OCRv4_rec_infer-v7.onnx
deleted file mode 100644
index 9522c96..0000000
Binary files a/godo/deps/windows/goconv/rapid/models/ch_PP-OCRv4_rec_infer-v7.onnx and /dev/null differ
diff --git a/godo/deps/windows/goconv/rapid/models/ch_ppocr_mobile_v2.0_cls_infer.onnx b/godo/deps/windows/goconv/rapid/models/ch_ppocr_mobile_v2.0_cls_infer.onnx
deleted file mode 100644
index 6b9d102..0000000
Binary files a/godo/deps/windows/goconv/rapid/models/ch_ppocr_mobile_v2.0_cls_infer.onnx and /dev/null differ
diff --git a/godo/deps/windows/goconv/rapid/models/ppocr_keys_v1.txt b/godo/deps/windows/goconv/rapid/models/ppocr_keys_v1.txt
deleted file mode 100644
index 84b885d..0000000
--- a/godo/deps/windows/goconv/rapid/models/ppocr_keys_v1.txt
+++ /dev/null
@@ -1,6623 +0,0 @@
-'
-疗
-绚
-诚
-娇
-溜
-题
-贿
-者
-廖
-更
-纳
-加
-奉
-公
-一
-就
-汴
-计
-与
-路
-房
-原
-妇
-2
-0
-8
--
-7
-其
->
-:
-]
-,
-,
-骑
-刈
-全
-消
-昏
-傈
-安
-久
-钟
-嗅
-不
-影
-处
-驽
-蜿
-资
-关
-椤
-地
-瘸
-专
-问
-忖
-票
-嫉
-炎
-韵
-要
-月
-田
-节
-陂
-鄙
-捌
-备
-拳
-伺
-眼
-网
-盎
-大
-傍
-心
-东
-愉
-汇
-蹿
-科
-每
-业
-里
-航
-晏
-字
-平
-录
-先
-1
-3
-彤
-鲶
-产
-稍
-督
-腴
-有
-象
-岳
-注
-绍
-在
-泺
-文
-定
-核
-名
-水
-过
-理
-让
-偷
-率
-等
-这
-发
-”
-为
-含
-肥
-酉
-相
-鄱
-七
-编
-猥
-锛
-日
-镀
-蒂
-掰
-倒
-辆
-栾
-栗
-综
-涩
-州
-雌
-滑
-馀
-了
-机
-块
-司
-宰
-甙
-兴
-矽
-抚
-保
-用
-沧
-秩
-如
-收
-息
-滥
-页
-疑
-埠
-!
-!
-姥
-异
-橹
-钇
-向
-下
-跄
-的
-椴
-沫
-国
-绥
-獠
-报
-开
-民
-蜇
-何
-分
-凇
-长
-讥
-藏
-掏
-施
-羽
-中
-讲
-派
-嘟
-人
-提
-浼
-间
-世
-而
-古
-多
-倪
-唇
-饯
-控
-庚
-首
-赛
-蜓
-味
-断
-制
-觉
-技
-替
-艰
-溢
-潮
-夕
-钺
-外
-摘
-枋
-动
-双
-单
-啮
-户
-枇
-确
-锦
-曜
-杜
-或
-能
-效
-霜
-盒
-然
-侗
-电
-晁
-放
-步
-鹃
-新
-杖
-蜂
-吒
-濂
-瞬
-评
-总
-隍
-对
-独
-合
-也
-是
-府
-青
-天
-诲
-墙
-组
-滴
-级
-邀
-帘
-示
-已
-时
-骸
-仄
-泅
-和
-遨
-店
-雇
-疫
-持
-巍
-踮
-境
-只
-亨
-目
-鉴
-崤
-闲
-体
-泄
-杂
-作
-般
-轰
-化
-解
-迂
-诿
-蛭
-璀
-腾
-告
-版
-服
-省
-师
-小
-规
-程
-线
-海
-办
-引
-二
-桧
-牌
-砺
-洄
-裴
-修
-图
-痫
-胡
-许
-犊
-事
-郛
-基
-柴
-呼
-食
-研
-奶
-律
-蛋
-因
-葆
-察
-戏
-褒
-戒
-再
-李
-骁
-工
-貂
-油
-鹅
-章
-啄
-休
-场
-给
-睡
-纷
-豆
-器
-捎
-说
-敏
-学
-会
-浒
-设
-诊
-格
-廓
-查
-来
-霓
-室
-溆
-¢
-诡
-寥
-焕
-舜
-柒
-狐
-回
-戟
-砾
-厄
-实
-翩
-尿
-五
-入
-径
-惭
-喹
-股
-宇
-篝
-|
-;
-美
-期
-云
-九
-祺
-扮
-靠
-锝
-槌
-系
-企
-酰
-阊
-暂
-蚕
-忻
-豁
-本
-羹
-执
-条
-钦
-H
-獒
-限
-进
-季
-楦
-于
-芘
-玖
-铋
-茯
-未
-答
-粘
-括
-样
-精
-欠
-矢
-甥
-帷
-嵩
-扣
-令
-仔
-风
-皈
-行
-支
-部
-蓉
-刮
-站
-蜡
-救
-钊
-汗
-松
-嫌
-成
-可
-.
-鹤
-院
-从
-交
-政
-怕
-活
-调
-球
-局
-验
-髌
-第
-韫
-谗
-串
-到
-圆
-年
-米
-/
-*
-友
-忿
-检
-区
-看
-自
-敢
-刃
-个
-兹
-弄
-流
-留
-同
-没
-齿
-星
-聆
-轼
-湖
-什
-三
-建
-蛔
-儿
-椋
-汕
-震
-颧
-鲤
-跟
-力
-情
-璺
-铨
-陪
-务
-指
-族
-训
-滦
-鄣
-濮
-扒
-商
-箱
-十
-召
-慷
-辗
-所
-莞
-管
-护
-臭
-横
-硒
-嗓
-接
-侦
-六
-露
-党
-馋
-驾
-剖
-高
-侬
-妪
-幂
-猗
-绺
-骐
-央
-酐
-孝
-筝
-课
-徇
-缰
-门
-男
-西
-项
-句
-谙
-瞒
-秃
-篇
-教
-碲
-罚
-声
-呐
-景
-前
-富
-嘴
-鳌
-稀
-免
-朋
-啬
-睐
-去
-赈
-鱼
-住
-肩
-愕
-速
-旁
-波
-厅
-健
-茼
-厥
-鲟
-谅
-投
-攸
-炔
-数
-方
-击
-呋
-谈
-绩
-别
-愫
-僚
-躬
-鹧
-胪
-炳
-招
-喇
-膨
-泵
-蹦
-毛
-结
-5
-4
-谱
-识
-陕
-粽
-婚
-拟
-构
-且
-搜
-任
-潘
-比
-郢
-妨
-醪
-陀
-桔
-碘
-扎
-选
-哈
-骷
-楷
-亿
-明
-缆
-脯
-监
-睫
-逻
-婵
-共
-赴
-淝
-凡
-惦
-及
-达
-揖
-谩
-澹
-减
-焰
-蛹
-番
-祁
-柏
-员
-禄
-怡
-峤
-龙
-白
-叽
-生
-闯
-起
-细
-装
-谕
-竟
-聚
-钙
-上
-导
-渊
-按
-艾
-辘
-挡
-耒
-盹
-饪
-臀
-记
-邮
-蕙
-受
-各
-医
-搂
-普
-滇
-朗
-茸
-带
-翻
-酚
-(
-光
-堤
-墟
-蔷
-万
-幻
-〓
-瑙
-辈
-昧
-盏
-亘
-蛀
-吉
-铰
-请
-子
-假
-闻
-税
-井
-诩
-哨
-嫂
-好
-面
-琐
-校
-馊
-鬣
-缂
-营
-访
-炖
-占
-农
-缀
-否
-经
-钚
-棵
-趟
-张
-亟
-吏
-茶
-谨
-捻
-论
-迸
-堂
-玉
-信
-吧
-瞠
-乡
-姬
-寺
-咬
-溏
-苄
-皿
-意
-赉
-宝
-尔
-钰
-艺
-特
-唳
-踉
-都
-荣
-倚
-登
-荐
-丧
-奇
-涵
-批
-炭
-近
-符
-傩
-感
-道
-着
-菊
-虹
-仲
-众
-懈
-濯
-颞
-眺
-南
-释
-北
-缝
-标
-既
-茗
-整
-撼
-迤
-贲
-挎
-耱
-拒
-某
-妍
-卫
-哇
-英
-矶
-藩
-治
-他
-元
-领
-膜
-遮
-穗
-蛾
-飞
-荒
-棺
-劫
-么
-市
-火
-温
-拈
-棚
-洼
-转
-果
-奕
-卸
-迪
-伸
-泳
-斗
-邡
-侄
-涨
-屯
-萋
-胭
-氡
-崮
-枞
-惧
-冒
-彩
-斜
-手
-豚
-随
-旭
-淑
-妞
-形
-菌
-吲
-沱
-争
-驯
-歹
-挟
-兆
-柱
-传
-至
-包
-内
-响
-临
-红
-功
-弩
-衡
-寂
-禁
-老
-棍
-耆
-渍
-织
-害
-氵
-渑
-布
-载
-靥
-嗬
-虽
-苹
-咨
-娄
-库
-雉
-榜
-帜
-嘲
-套
-瑚
-亲
-簸
-欧
-边
-6
-腿
-旮
-抛
-吹
-瞳
-得
-镓
-梗
-厨
-继
-漾
-愣
-憨
-士
-策
-窑
-抑
-躯
-襟
-脏
-参
-贸
-言
-干
-绸
-鳄
-穷
-藜
-音
-折
-详
-)
-举
-悍
-甸
-癌
-黎
-谴
-死
-罩
-迁
-寒
-驷
-袖
-媒
-蒋
-掘
-模
-纠
-恣
-观
-祖
-蛆
-碍
-位
-稿
-主
-澧
-跌
-筏
-京
-锏
-帝
-贴
-证
-糠
-才
-黄
-鲸
-略
-炯
-饱
-四
-出
-园
-犀
-牧
-容
-汉
-杆
-浈
-汰
-瑷
-造
-虫
-瘩
-怪
-驴
-济
-应
-花
-沣
-谔
-夙
-旅
-价
-矿
-以
-考
-s
-u
-呦
-晒
-巡
-茅
-准
-肟
-瓴
-詹
-仟
-褂
-译
-桌
-混
-宁
-怦
-郑
-抿
-些
-余
-鄂
-饴
-攒
-珑
-群
-阖
-岔
-琨
-藓
-预
-环
-洮
-岌
-宀
-杲
-瀵
-最
-常
-囡
-周
-踊
-女
-鼓
-袭
-喉
-简
-范
-薯
-遐
-疏
-粱
-黜
-禧
-法
-箔
-斤
-遥
-汝
-奥
-直
-贞
-撑
-置
-绱
-集
-她
-馅
-逗
-钧
-橱
-魉
-[
-恙
-躁
-唤
-9
-旺
-膘
-待
-脾
-惫
-购
-吗
-依
-盲
-度
-瘿
-蠖
-俾
-之
-镗
-拇
-鲵
-厝
-簧
-续
-款
-展
-啃
-表
-剔
-品
-钻
-腭
-损
-清
-锶
-统
-涌
-寸
-滨
-贪
-链
-吠
-冈
-伎
-迥
-咏
-吁
-览
-防
-迅
-失
-汾
-阔
-逵
-绀
-蔑
-列
-川
-凭
-努
-熨
-揪
-利
-俱
-绉
-抢
-鸨
-我
-即
-责
-膦
-易
-毓
-鹊
-刹
-玷
-岿
-空
-嘞
-绊
-排
-术
-估
-锷
-违
-们
-苟
-铜
-播
-肘
-件
-烫
-审
-鲂
-广
-像
-铌
-惰
-铟
-巳
-胍
-鲍
-康
-憧
-色
-恢
-想
-拷
-尤
-疳
-知
-S
-Y
-F
-D
-A
-峄
-裕
-帮
-握
-搔
-氐
-氘
-难
-墒
-沮
-雨
-叁
-缥
-悴
-藐
-湫
-娟
-苑
-稠
-颛
-簇
-后
-阕
-闭
-蕤
-缚
-怎
-佞
-码
-嘤
-蔡
-痊
-舱
-螯
-帕
-赫
-昵
-升
-烬
-岫
-、
-疵
-蜻
-髁
-蕨
-隶
-烛
-械
-丑
-盂
-梁
-强
-鲛
-由
-拘
-揉
-劭
-龟
-撤
-钩
-呕
-孛
-费
-妻
-漂
-求
-阑
-崖
-秤
-甘
-通
-深
-补
-赃
-坎
-床
-啪
-承
-吼
-量
-暇
-钼
-烨
-阂
-擎
-脱
-逮
-称
-P
-神
-属
-矗
-华
-届
-狍
-葑
-汹
-育
-患
-窒
-蛰
-佼
-静
-槎
-运
-鳗
-庆
-逝
-曼
-疱
-克
-代
-官
-此
-麸
-耧
-蚌
-晟
-例
-础
-榛
-副
-测
-唰
-缢
-迹
-灬
-霁
-身
-岁
-赭
-扛
-又
-菡
-乜
-雾
-板
-读
-陷
-徉
-贯
-郁
-虑
-变
-钓
-菜
-圾
-现
-琢
-式
-乐
-维
-渔
-浜
-左
-吾
-脑
-钡
-警
-T
-啵
-拴
-偌
-漱
-湿
-硕
-止
-骼
-魄
-积
-燥
-联
-踢
-玛
-则
-窿
-见
-振
-畿
-送
-班
-钽
-您
-赵
-刨
-印
-讨
-踝
-籍
-谡
-舌
-崧
-汽
-蔽
-沪
-酥
-绒
-怖
-财
-帖
-肱
-私
-莎
-勋
-羔
-霸
-励
-哼
-帐
-将
-帅
-渠
-纪
-婴
-娩
-岭
-厘
-滕
-吻
-伤
-坝
-冠
-戊
-隆
-瘁
-介
-涧
-物
-黍
-并
-姗
-奢
-蹑
-掣
-垸
-锴
-命
-箍
-捉
-病
-辖
-琰
-眭
-迩
-艘
-绌
-繁
-寅
-若
-毋
-思
-诉
-类
-诈
-燮
-轲
-酮
-狂
-重
-反
-职
-筱
-县
-委
-磕
-绣
-奖
-晋
-濉
-志
-徽
-肠
-呈
-獐
-坻
-口
-片
-碰
-几
-村
-柿
-劳
-料
-获
-亩
-惕
-晕
-厌
-号
-罢
-池
-正
-鏖
-煨
-家
-棕
-复
-尝
-懋
-蜥
-锅
-岛
-扰
-队
-坠
-瘾
-钬
-@
-卧
-疣
-镇
-譬
-冰
-彷
-频
-黯
-据
-垄
-采
-八
-缪
-瘫
-型
-熹
-砰
-楠
-襁
-箐
-但
-嘶
-绳
-啤
-拍
-盥
-穆
-傲
-洗
-盯
-塘
-怔
-筛
-丿
-台
-恒
-喂
-葛
-永
-¥
-烟
-酒
-桦
-书
-砂
-蚝
-缉
-态
-瀚
-袄
-圳
-轻
-蛛
-超
-榧
-遛
-姒
-奘
-铮
-右
-荽
-望
-偻
-卡
-丶
-氰
-附
-做
-革
-索
-戚
-坨
-桷
-唁
-垅
-榻
-岐
-偎
-坛
-莨
-山
-殊
-微
-骇
-陈
-爨
-推
-嗝
-驹
-澡
-藁
-呤
-卤
-嘻
-糅
-逛
-侵
-郓
-酌
-德
-摇
-※
-鬃
-被
-慨
-殡
-羸
-昌
-泡
-戛
-鞋
-河
-宪
-沿
-玲
-鲨
-翅
-哽
-源
-铅
-语
-照
-邯
-址
-荃
-佬
-顺
-鸳
-町
-霭
-睾
-瓢
-夸
-椁
-晓
-酿
-痈
-咔
-侏
-券
-噎
-湍
-签
-嚷
-离
-午
-尚
-社
-锤
-背
-孟
-使
-浪
-缦
-潍
-鞅
-军
-姹
-驶
-笑
-鳟
-鲁
-》
-孽
-钜
-绿
-洱
-礴
-焯
-椰
-颖
-囔
-乌
-孔
-巴
-互
-性
-椽
-哞
-聘
-昨
-早
-暮
-胶
-炀
-隧
-低
-彗
-昝
-铁
-呓
-氽
-藉
-喔
-癖
-瑗
-姨
-权
-胱
-韦
-堑
-蜜
-酋
-楝
-砝
-毁
-靓
-歙
-锲
-究
-屋
-喳
-骨
-辨
-碑
-武
-鸠
-宫
-辜
-烊
-适
-坡
-殃
-培
-佩
-供
-走
-蜈
-迟
-翼
-况
-姣
-凛
-浔
-吃
-飘
-债
-犟
-金
-促
-苛
-崇
-坂
-莳
-畔
-绂
-兵
-蠕
-斋
-根
-砍
-亢
-欢
-恬
-崔
-剁
-餐
-榫
-快
-扶
-‖
-濒
-缠
-鳜
-当
-彭
-驭
-浦
-篮
-昀
-锆
-秸
-钳
-弋
-娣
-瞑
-夷
-龛
-苫
-拱
-致
-%
-嵊
-障
-隐
-弑
-初
-娓
-抉
-汩
-累
-蓖
-"
-唬
-助
-苓
-昙
-押
-毙
-破
-城
-郧
-逢
-嚏
-獭
-瞻
-溱
-婿
-赊
-跨
-恼
-璧
-萃
-姻
-貉
-灵
-炉
-密
-氛
-陶
-砸
-谬
-衔
-点
-琛
-沛
-枳
-层
-岱
-诺
-脍
-榈
-埂
-征
-冷
-裁
-打
-蹴
-素
-瘘
-逞
-蛐
-聊
-激
-腱
-萘
-踵
-飒
-蓟
-吆
-取
-咙
-簋
-涓
-矩
-曝
-挺
-揣
-座
-你
-史
-舵
-焱
-尘
-苏
-笈
-脚
-溉
-榨
-诵
-樊
-邓
-焊
-义
-庶
-儋
-蟋
-蒲
-赦
-呷
-杞
-诠
-豪
-还
-试
-颓
-茉
-太
-除
-紫
-逃
-痴
-草
-充
-鳕
-珉
-祗
-墨
-渭
-烩
-蘸
-慕
-璇
-镶
-穴
-嵘
-恶
-骂
-险
-绋
-幕
-碉
-肺
-戳
-刘
-潞
-秣
-纾
-潜
-銮
-洛
-须
-罘
-销
-瘪
-汞
-兮
-屉
-r
-林
-厕
-质
-探
-划
-狸
-殚
-善
-煊
-烹
-〒
-锈
-逯
-宸
-辍
-泱
-柚
-袍
-远
-蹋
-嶙
-绝
-峥
-娥
-缍
-雀
-徵
-认
-镱
-谷
-=
-贩
-勉
-撩
-鄯
-斐
-洋
-非
-祚
-泾
-诒
-饿
-撬
-威
-晷
-搭
-芍
-锥
-笺
-蓦
-候
-琊
-档
-礁
-沼
-卵
-荠
-忑
-朝
-凹
-瑞
-头
-仪
-弧
-孵
-畏
-铆
-突
-衲
-车
-浩
-气
-茂
-悖
-厢
-枕
-酝
-戴
-湾
-邹
-飚
-攘
-锂
-写
-宵
-翁
-岷
-无
-喜
-丈
-挑
-嗟
-绛
-殉
-议
-槽
-具
-醇
-淞
-笃
-郴
-阅
-饼
-底
-壕
-砚
-弈
-询
-缕
-庹
-翟
-零
-筷
-暨
-舟
-闺
-甯
-撞
-麂
-茌
-蔼
-很
-珲
-捕
-棠
-角
-阉
-媛
-娲
-诽
-剿
-尉
-爵
-睬
-韩
-诰
-匣
-危
-糍
-镯
-立
-浏
-阳
-少
-盆
-舔
-擘
-匪
-申
-尬
-铣
-旯
-抖
-赘
-瓯
-居
-ˇ
-哮
-游
-锭
-茏
-歌
-坏
-甚
-秒
-舞
-沙
-仗
-劲
-潺
-阿
-燧
-郭
-嗖
-霏
-忠
-材
-奂
-耐
-跺
-砀
-输
-岖
-媳
-氟
-极
-摆
-灿
-今
-扔
-腻
-枝
-奎
-药
-熄
-吨
-话
-q
-额
-慑
-嘌
-协
-喀
-壳
-埭
-视
-著
-於
-愧
-陲
-翌
-峁
-颅
-佛
-腹
-聋
-侯
-咎
-叟
-秀
-颇
-存
-较
-罪
-哄
-岗
-扫
-栏
-钾
-羌
-己
-璨
-枭
-霉
-煌
-涸
-衿
-键
-镝
-益
-岢
-奏
-连
-夯
-睿
-冥
-均
-糖
-狞
-蹊
-稻
-爸
-刿
-胥
-煜
-丽
-肿
-璃
-掸
-跚
-灾
-垂
-樾
-濑
-乎
-莲
-窄
-犹
-撮
-战
-馄
-软
-络
-显
-鸢
-胸
-宾
-妲
-恕
-埔
-蝌
-份
-遇
-巧
-瞟
-粒
-恰
-剥
-桡
-博
-讯
-凯
-堇
-阶
-滤
-卖
-斌
-骚
-彬
-兑
-磺
-樱
-舷
-两
-娱
-福
-仃
-差
-找
-桁
-÷
-净
-把
-阴
-污
-戬
-雷
-碓
-蕲
-楚
-罡
-焖
-抽
-妫
-咒
-仑
-闱
-尽
-邑
-菁
-爱
-贷
-沥
-鞑
-牡
-嗉
-崴
-骤
-塌
-嗦
-订
-拮
-滓
-捡
-锻
-次
-坪
-杩
-臃
-箬
-融
-珂
-鹗
-宗
-枚
-降
-鸬
-妯
-阄
-堰
-盐
-毅
-必
-杨
-崃
-俺
-甬
-状
-莘
-货
-耸
-菱
-腼
-铸
-唏
-痤
-孚
-澳
-懒
-溅
-翘
-疙
-杷
-淼
-缙
-骰
-喊
-悉
-砻
-坷
-艇
-赁
-界
-谤
-纣
-宴
-晃
-茹
-归
-饭
-梢
-铡
-街
-抄
-肼
-鬟
-苯
-颂
-撷
-戈
-炒
-咆
-茭
-瘙
-负
-仰
-客
-琉
-铢
-封
-卑
-珥
-椿
-镧
-窨
-鬲
-寿
-御
-袤
-铃
-萎
-砖
-餮
-脒
-裳
-肪
-孕
-嫣
-馗
-嵇
-恳
-氯
-江
-石
-褶
-冢
-祸
-阻
-狈
-羞
-银
-靳
-透
-咳
-叼
-敷
-芷
-啥
-它
-瓤
-兰
-痘
-懊
-逑
-肌
-往
-捺
-坊
-甩
-呻
-〃
-沦
-忘
-膻
-祟
-菅
-剧
-崆
-智
-坯
-臧
-霍
-墅
-攻
-眯
-倘
-拢
-骠
-铐
-庭
-岙
-瓠
-′
-缺
-泥
-迢
-捶
-?
-?
-郏
-喙
-掷
-沌
-纯
-秘
-种
-听
-绘
-固
-螨
-团
-香
-盗
-妒
-埚
-蓝
-拖
-旱
-荞
-铀
-血
-遏
-汲
-辰
-叩
-拽
-幅
-硬
-惶
-桀
-漠
-措
-泼
-唑
-齐
-肾
-念
-酱
-虚
-屁
-耶
-旗
-砦
-闵
-婉
-馆
-拭
-绅
-韧
-忏
-窝
-醋
-葺
-顾
-辞
-倜
-堆
-辋
-逆
-玟
-贱
-疾
-董
-惘
-倌
-锕
-淘
-嘀
-莽
-俭
-笏
-绑
-鲷
-杈
-择
-蟀
-粥
-嗯
-驰
-逾
-案
-谪
-褓
-胫
-哩
-昕
-颚
-鲢
-绠
-躺
-鹄
-崂
-儒
-俨
-丝
-尕
-泌
-啊
-萸
-彰
-幺
-吟
-骄
-苣
-弦
-脊
-瑰
-〈
-诛
-镁
-析
-闪
-剪
-侧
-哟
-框
-螃
-守
-嬗
-燕
-狭
-铈
-缮
-概
-迳
-痧
-鲲
-俯
-售
-笼
-痣
-扉
-挖
-满
-咋
-援
-邱
-扇
-歪
-便
-玑
-绦
-峡
-蛇
-叨
-〖
-泽
-胃
-斓
-喋
-怂
-坟
-猪
-该
-蚬
-炕
-弥
-赞
-棣
-晔
-娠
-挲
-狡
-创
-疖
-铕
-镭
-稷
-挫
-弭
-啾
-翔
-粉
-履
-苘
-哦
-楼
-秕
-铂
-土
-锣
-瘟
-挣
-栉
-习
-享
-桢
-袅
-磨
-桂
-谦
-延
-坚
-蔚
-噗
-署
-谟
-猬
-钎
-恐
-嬉
-雒
-倦
-衅
-亏
-璩
-睹
-刻
-殿
-王
-算
-雕
-麻
-丘
-柯
-骆
-丸
-塍
-谚
-添
-鲈
-垓
-桎
-蚯
-芥
-予
-飕
-镦
-谌
-窗
-醚
-菀
-亮
-搪
-莺
-蒿
-羁
-足
-J
-真
-轶
-悬
-衷
-靛
-翊
-掩
-哒
-炅
-掐
-冼
-妮
-l
-谐
-稚
-荆
-擒
-犯
-陵
-虏
-浓
-崽
-刍
-陌
-傻
-孜
-千
-靖
-演
-矜
-钕
-煽
-杰
-酗
-渗
-伞
-栋
-俗
-泫
-戍
-罕
-沾
-疽
-灏
-煦
-芬
-磴
-叱
-阱
-榉
-湃
-蜀
-叉
-醒
-彪
-租
-郡
-篷
-屎
-良
-垢
-隗
-弱
-陨
-峪
-砷
-掴
-颁
-胎
-雯
-绵
-贬
-沐
-撵
-隘
-篙
-暖
-曹
-陡
-栓
-填
-臼
-彦
-瓶
-琪
-潼
-哪
-鸡
-摩
-啦
-俟
-锋
-域
-耻
-蔫
-疯
-纹
-撇
-毒
-绶
-痛
-酯
-忍
-爪
-赳
-歆
-嘹
-辕
-烈
-册
-朴
-钱
-吮
-毯
-癜
-娃
-谀
-邵
-厮
-炽
-璞
-邃
-丐
-追
-词
-瓒
-忆
-轧
-芫
-谯
-喷
-弟
-半
-冕
-裙
-掖
-墉
-绮
-寝
-苔
-势
-顷
-褥
-切
-衮
-君
-佳
-嫒
-蚩
-霞
-佚
-洙
-逊
-镖
-暹
-唛
-&
-殒
-顶
-碗
-獗
-轭
-铺
-蛊
-废
-恹
-汨
-崩
-珍
-那
-杵
-曲
-纺
-夏
-薰
-傀
-闳
-淬
-姘
-舀
-拧
-卷
-楂
-恍
-讪
-厩
-寮
-篪
-赓
-乘
-灭
-盅
-鞣
-沟
-慎
-挂
-饺
-鼾
-杳
-树
-缨
-丛
-絮
-娌
-臻
-嗳
-篡
-侩
-述
-衰
-矛
-圈
-蚜
-匕
-筹
-匿
-濞
-晨
-叶
-骋
-郝
-挚
-蚴
-滞
-增
-侍
-描
-瓣
-吖
-嫦
-蟒
-匾
-圣
-赌
-毡
-癞
-恺
-百
-曳
-需
-篓
-肮
-庖
-帏
-卿
-驿
-遗
-蹬
-鬓
-骡
-歉
-芎
-胳
-屐
-禽
-烦
-晌
-寄
-媾
-狄
-翡
-苒
-船
-廉
-终
-痞
-殇
-々
-畦
-饶
-改
-拆
-悻
-萄
-£
-瓿
-乃
-訾
-桅
-匮
-溧
-拥
-纱
-铍
-骗
-蕃
-龋
-缬
-父
-佐
-疚
-栎
-醍
-掳
-蓄
-x
-惆
-颜
-鲆
-榆
-〔
-猎
-敌
-暴
-谥
-鲫
-贾
-罗
-玻
-缄
-扦
-芪
-癣
-落
-徒
-臾
-恿
-猩
-托
-邴
-肄
-牵
-春
-陛
-耀
-刊
-拓
-蓓
-邳
-堕
-寇
-枉
-淌
-啡
-湄
-兽
-酷
-萼
-碚
-濠
-萤
-夹
-旬
-戮
-梭
-琥
-椭
-昔
-勺
-蜊
-绐
-晚
-孺
-僵
-宣
-摄
-冽
-旨
-萌
-忙
-蚤
-眉
-噼
-蟑
-付
-契
-瓜
-悼
-颡
-壁
-曾
-窕
-颢
-澎
-仿
-俑
-浑
-嵌
-浣
-乍
-碌
-褪
-乱
-蔟
-隙
-玩
-剐
-葫
-箫
-纲
-围
-伐
-决
-伙
-漩
-瑟
-刑
-肓
-镳
-缓
-蹭
-氨
-皓
-典
-畲
-坍
-铑
-檐
-塑
-洞
-倬
-储
-胴
-淳
-戾
-吐
-灼
-惺
-妙
-毕
-珐
-缈
-虱
-盖
-羰
-鸿
-磅
-谓
-髅
-娴
-苴
-唷
-蚣
-霹
-抨
-贤
-唠
-犬
-誓
-逍
-庠
-逼
-麓
-籼
-釉
-呜
-碧
-秧
-氩
-摔
-霄
-穸
-纨
-辟
-妈
-映
-完
-牛
-缴
-嗷
-炊
-恩
-荔
-茆
-掉
-紊
-慌
-莓
-羟
-阙
-萁
-磐
-另
-蕹
-辱
-鳐
-湮
-吡
-吩
-唐
-睦
-垠
-舒
-圜
-冗
-瞿
-溺
-芾
-囱
-匠
-僳
-汐
-菩
-饬
-漓
-黑
-霰
-浸
-濡
-窥
-毂
-蒡
-兢
-驻
-鹉
-芮
-诙
-迫
-雳
-厂
-忐
-臆
-猴
-鸣
-蚪
-栈
-箕
-羡
-渐
-莆
-捍
-眈
-哓
-趴
-蹼
-埕
-嚣
-骛
-宏
-淄
-斑
-噜
-严
-瑛
-垃
-椎
-诱
-压
-庾
-绞
-焘
-廿
-抡
-迄
-棘
-夫
-纬
-锹
-眨
-瞌
-侠
-脐
-竞
-瀑
-孳
-骧
-遁
-姜
-颦
-荪
-滚
-萦
-伪
-逸
-粳
-爬
-锁
-矣
-役
-趣
-洒
-颔
-诏
-逐
-奸
-甭
-惠
-攀
-蹄
-泛
-尼
-拼
-阮
-鹰
-亚
-颈
-惑
-勒
-〉
-际
-肛
-爷
-刚
-钨
-丰
-养
-冶
-鲽
-辉
-蔻
-画
-覆
-皴
-妊
-麦
-返
-醉
-皂
-擀
-〗
-酶
-凑
-粹
-悟
-诀
-硖
-港
-卜
-z
-杀
-涕
-±
-舍
-铠
-抵
-弛
-段
-敝
-镐
-奠
-拂
-轴
-跛
-袱
-e
-t
-沉
-菇
-俎
-薪
-峦
-秭
-蟹
-历
-盟
-菠
-寡
-液
-肢
-喻
-染
-裱
-悱
-抱
-氙
-赤
-捅
-猛
-跑
-氮
-谣
-仁
-尺
-辊
-窍
-烙
-衍
-架
-擦
-倏
-璐
-瑁
-币
-楞
-胖
-夔
-趸
-邛
-惴
-饕
-虔
-蝎
-§
-哉
-贝
-宽
-辫
-炮
-扩
-饲
-籽
-魏
-菟
-锰
-伍
-猝
-末
-琳
-哚
-蛎
-邂
-呀
-姿
-鄞
-却
-歧
-仙
-恸
-椐
-森
-牒
-寤
-袒
-婆
-虢
-雅
-钉
-朵
-贼
-欲
-苞
-寰
-故
-龚
-坭
-嘘
-咫
-礼
-硷
-兀
-睢
-汶
-’
-铲
-烧
-绕
-诃
-浃
-钿
-哺
-柜
-讼
-颊
-璁
-腔
-洽
-咐
-脲
-簌
-筠
-镣
-玮
-鞠
-谁
-兼
-姆
-挥
-梯
-蝴
-谘
-漕
-刷
-躏
-宦
-弼
-b
-垌
-劈
-麟
-莉
-揭
-笙
-渎
-仕
-嗤
-仓
-配
-怏
-抬
-错
-泯
-镊
-孰
-猿
-邪
-仍
-秋
-鼬
-壹
-歇
-吵
-炼
-<
-尧
-射
-柬
-廷
-胧
-霾
-凳
-隋
-肚
-浮
-梦
-祥
-株
-堵
-退
-L
-鹫
-跎
-凶
-毽
-荟
-炫
-栩
-玳
-甜
-沂
-鹿
-顽
-伯
-爹
-赔
-蛴
-徐
-匡
-欣
-狰
-缸
-雹
-蟆
-疤
-默
-沤
-啜
-痂
-衣
-禅
-w
-i
-h
-辽
-葳
-黝
-钗
-停
-沽
-棒
-馨
-颌
-肉
-吴
-硫
-悯
-劾
-娈
-马
-啧
-吊
-悌
-镑
-峭
-帆
-瀣
-涉
-咸
-疸
-滋
-泣
-翦
-拙
-癸
-钥
-蜒
-+
-尾
-庄
-凝
-泉
-婢
-渴
-谊
-乞
-陆
-锉
-糊
-鸦
-淮
-I
-B
-N
-晦
-弗
-乔
-庥
-葡
-尻
-席
-橡
-傣
-渣
-拿
-惩
-麋
-斛
-缃
-矮
-蛏
-岘
-鸽
-姐
-膏
-催
-奔
-镒
-喱
-蠡
-摧
-钯
-胤
-柠
-拐
-璋
-鸥
-卢
-荡
-倾
-^
-_
-珀
-逄
-萧
-塾
-掇
-贮
-笆
-聂
-圃
-冲
-嵬
-M
-滔
-笕
-值
-炙
-偶
-蜱
-搐
-梆
-汪
-蔬
-腑
-鸯
-蹇
-敞
-绯
-仨
-祯
-谆
-梧
-糗
-鑫
-啸
-豺
-囹
-猾
-巢
-柄
-瀛
-筑
-踌
-沭
-暗
-苁
-鱿
-蹉
-脂
-蘖
-牢
-热
-木
-吸
-溃
-宠
-序
-泞
-偿
-拜
-檩
-厚
-朐
-毗
-螳
-吞
-媚
-朽
-担
-蝗
-橘
-畴
-祈
-糟
-盱
-隼
-郜
-惜
-珠
-裨
-铵
-焙
-琚
-唯
-咚
-噪
-骊
-丫
-滢
-勤
-棉
-呸
-咣
-淀
-隔
-蕾
-窈
-饨
-挨
-煅
-短
-匙
-粕
-镜
-赣
-撕
-墩
-酬
-馁
-豌
-颐
-抗
-酣
-氓
-佑
-搁
-哭
-递
-耷
-涡
-桃
-贻
-碣
-截
-瘦
-昭
-镌
-蔓
-氚
-甲
-猕
-蕴
-蓬
-散
-拾
-纛
-狼
-猷
-铎
-埋
-旖
-矾
-讳
-囊
-糜
-迈
-粟
-蚂
-紧
-鲳
-瘢
-栽
-稼
-羊
-锄
-斟
-睁
-桥
-瓮
-蹙
-祉
-醺
-鼻
-昱
-剃
-跳
-篱
-跷
-蒜
-翎
-宅
-晖
-嗑
-壑
-峻
-癫
-屏
-狠
-陋
-袜
-途
-憎
-祀
-莹
-滟
-佶
-溥
-臣
-约
-盛
-峰
-磁
-慵
-婪
-拦
-莅
-朕
-鹦
-粲
-裤
-哎
-疡
-嫖
-琵
-窟
-堪
-谛
-嘉
-儡
-鳝
-斩
-郾
-驸
-酊
-妄
-胜
-贺
-徙
-傅
-噌
-钢
-栅
-庇
-恋
-匝
-巯
-邈
-尸
-锚
-粗
-佟
-蛟
-薹
-纵
-蚊
-郅
-绢
-锐
-苗
-俞
-篆
-淆
-膀
-鲜
-煎
-诶
-秽
-寻
-涮
-刺
-怀
-噶
-巨
-褰
-魅
-灶
-灌
-桉
-藕
-谜
-舸
-薄
-搀
-恽
-借
-牯
-痉
-渥
-愿
-亓
-耘
-杠
-柩
-锔
-蚶
-钣
-珈
-喘
-蹒
-幽
-赐
-稗
-晤
-莱
-泔
-扯
-肯
-菪
-裆
-腩
-豉
-疆
-骜
-腐
-倭
-珏
-唔
-粮
-亡
-润
-慰
-伽
-橄
-玄
-誉
-醐
-胆
-龊
-粼
-塬
-陇
-彼
-削
-嗣
-绾
-芽
-妗
-垭
-瘴
-爽
-薏
-寨
-龈
-泠
-弹
-赢
-漪
-猫
-嘧
-涂
-恤
-圭
-茧
-烽
-屑
-痕
-巾
-赖
-荸
-凰
-腮
-畈
-亵
-蹲
-偃
-苇
-澜
-艮
-换
-骺
-烘
-苕
-梓
-颉
-肇
-哗
-悄
-氤
-涠
-葬
-屠
-鹭
-植
-竺
-佯
-诣
-鲇
-瘀
-鲅
-邦
-移
-滁
-冯
-耕
-癔
-戌
-茬
-沁
-巩
-悠
-湘
-洪
-痹
-锟
-循
-谋
-腕
-鳃
-钠
-捞
-焉
-迎
-碱
-伫
-急
-榷
-奈
-邝
-卯
-辄
-皲
-卟
-醛
-畹
-忧
-稳
-雄
-昼
-缩
-阈
-睑
-扌
-耗
-曦
-涅
-捏
-瞧
-邕
-淖
-漉
-铝
-耦
-禹
-湛
-喽
-莼
-琅
-诸
-苎
-纂
-硅
-始
-嗨
-傥
-燃
-臂
-赅
-嘈
-呆
-贵
-屹
-壮
-肋
-亍
-蚀
-卅
-豹
-腆
-邬
-迭
-浊
-}
-童
-螂
-捐
-圩
-勐
-触
-寞
-汊
-壤
-荫
-膺
-渌
-芳
-懿
-遴
-螈
-泰
-蓼
-蛤
-茜
-舅
-枫
-朔
-膝
-眙
-避
-梅
-判
-鹜
-璜
-牍
-缅
-垫
-藻
-黔
-侥
-惚
-懂
-踩
-腰
-腈
-札
-丞
-唾
-慈
-顿
-摹
-荻
-琬
-~
-斧
-沈
-滂
-胁
-胀
-幄
-莜
-Z
-匀
-鄄
-掌
-绰
-茎
-焚
-赋
-萱
-谑
-汁
-铒
-瞎
-夺
-蜗
-野
-娆
-冀
-弯
-篁
-懵
-灞
-隽
-芡
-脘
-俐
-辩
-芯
-掺
-喏
-膈
-蝈
-觐
-悚
-踹
-蔗
-熠
-鼠
-呵
-抓
-橼
-峨
-畜
-缔
-禾
-崭
-弃
-熊
-摒
-凸
-拗
-穹
-蒙
-抒
-祛
-劝
-闫
-扳
-阵
-醌
-踪
-喵
-侣
-搬
-仅
-荧
-赎
-蝾
-琦
-买
-婧
-瞄
-寓
-皎
-冻
-赝
-箩
-莫
-瞰
-郊
-笫
-姝
-筒
-枪
-遣
-煸
-袋
-舆
-痱
-涛
-母
-〇
-启
-践
-耙
-绲
-盘
-遂
-昊
-搞
-槿
-诬
-纰
-泓
-惨
-檬
-亻
-越
-C
-o
-憩
-熵
-祷
-钒
-暧
-塔
-阗
-胰
-咄
-娶
-魔
-琶
-钞
-邻
-扬
-杉
-殴
-咽
-弓
-〆
-髻
-】
-吭
-揽
-霆
-拄
-殖
-脆
-彻
-岩
-芝
-勃
-辣
-剌
-钝
-嘎
-甄
-佘
-皖
-伦
-授
-徕
-憔
-挪
-皇
-庞
-稔
-芜
-踏
-溴
-兖
-卒
-擢
-饥
-鳞
-煲
-‰
-账
-颗
-叻
-斯
-捧
-鳍
-琮
-讹
-蛙
-纽
-谭
-酸
-兔
-莒
-睇
-伟
-觑
-羲
-嗜
-宜
-褐
-旎
-辛
-卦
-诘
-筋
-鎏
-溪
-挛
-熔
-阜
-晰
-鳅
-丢
-奚
-灸
-呱
-献
-陉
-黛
-鸪
-甾
-萨
-疮
-拯
-洲
-疹
-辑
-叙
-恻
-谒
-允
-柔
-烂
-氏
-逅
-漆
-拎
-惋
-扈
-湟
-纭
-啕
-掬
-擞
-哥
-忽
-涤
-鸵
-靡
-郗
-瓷
-扁
-廊
-怨
-雏
-钮
-敦
-E
-懦
-憋
-汀
-拚
-啉
-腌
-岸
-f
-痼
-瞅
-尊
-咀
-眩
-飙
-忌
-仝
-迦
-熬
-毫
-胯
-篑
-茄
-腺
-凄
-舛
-碴
-锵
-诧
-羯
-後
-漏
-汤
-宓
-仞
-蚁
-壶
-谰
-皑
-铄
-棰
-罔
-辅
-晶
-苦
-牟
-闽
-\
-烃
-饮
-聿
-丙
-蛳
-朱
-煤
-涔
-鳖
-犁
-罐
-荼
-砒
-淦
-妤
-黏
-戎
-孑
-婕
-瑾
-戢
-钵
-枣
-捋
-砥
-衩
-狙
-桠
-稣
-阎
-肃
-梏
-诫
-孪
-昶
-婊
-衫
-嗔
-侃
-塞
-蜃
-樵
-峒
-貌
-屿
-欺
-缫
-阐
-栖
-诟
-珞
-荭
-吝
-萍
-嗽
-恂
-啻
-蜴
-磬
-峋
-俸
-豫
-谎
-徊
-镍
-韬
-魇
-晴
-U
-囟
-猜
-蛮
-坐
-囿
-伴
-亭
-肝
-佗
-蝠
-妃
-胞
-滩
-榴
-氖
-垩
-苋
-砣
-扪
-馏
-姓
-轩
-厉
-夥
-侈
-禀
-垒
-岑
-赏
-钛
-辐
-痔
-披
-纸
-碳
-“
-坞
-蠓
-挤
-荥
-沅
-悔
-铧
-帼
-蒌
-蝇
-a
-p
-y
-n
-g
-哀
-浆
-瑶
-凿
-桶
-馈
-皮
-奴
-苜
-佤
-伶
-晗
-铱
-炬
-优
-弊
-氢
-恃
-甫
-攥
-端
-锌
-灰
-稹
-炝
-曙
-邋
-亥
-眶
-碾
-拉
-萝
-绔
-捷
-浍
-腋
-姑
-菖
-凌
-涞
-麽
-锢
-桨
-潢
-绎
-镰
-殆
-锑
-渝
-铬
-困
-绽
-觎
-匈
-糙
-暑
-裹
-鸟
-盔
-肽
-迷
-綦
-『
-亳
-佝
-俘
-钴
-觇
-骥
-仆
-疝
-跪
-婶
-郯
-瀹
-唉
-脖
-踞
-针
-晾
-忒
-扼
-瞩
-叛
-椒
-疟
-嗡
-邗
-肆
-跆
-玫
-忡
-捣
-咧
-唆
-艄
-蘑
-潦
-笛
-阚
-沸
-泻
-掊
-菽
-贫
-斥
-髂
-孢
-镂
-赂
-麝
-鸾
-屡
-衬
-苷
-恪
-叠
-希
-粤
-爻
-喝
-茫
-惬
-郸
-绻
-庸
-撅
-碟
-宄
-妹
-膛
-叮
-饵
-崛
-嗲
-椅
-冤
-搅
-咕
-敛
-尹
-垦
-闷
-蝉
-霎
-勰
-败
-蓑
-泸
-肤
-鹌
-幌
-焦
-浠
-鞍
-刁
-舰
-乙
-竿
-裔
-。
-茵
-函
-伊
-兄
-丨
-娜
-匍
-謇
-莪
-宥
-似
-蝽
-翳
-酪
-翠
-粑
-薇
-祢
-骏
-赠
-叫
-Q
-噤
-噻
-竖
-芗
-莠
-潭
-俊
-羿
-耜
-O
-郫
-趁
-嗪
-囚
-蹶
-芒
-洁
-笋
-鹑
-敲
-硝
-啶
-堡
-渲
-揩
-』
-携
-宿
-遒
-颍
-扭
-棱
-割
-萜
-蔸
-葵
-琴
-捂
-饰
-衙
-耿
-掠
-募
-岂
-窖
-涟
-蔺
-瘤
-柞
-瞪
-怜
-匹
-距
-楔
-炜
-哆
-秦
-缎
-幼
-茁
-绪
-痨
-恨
-楸
-娅
-瓦
-桩
-雪
-嬴
-伏
-榔
-妥
-铿
-拌
-眠
-雍
-缇
-‘
-卓
-搓
-哌
-觞
-噩
-屈
-哧
-髓
-咦
-巅
-娑
-侑
-淫
-膳
-祝
-勾
-姊
-莴
-胄
-疃
-薛
-蜷
-胛
-巷
-芙
-芋
-熙
-闰
-勿
-窃
-狱
-剩
-钏
-幢
-陟
-铛
-慧
-靴
-耍
-k
-浙
-浇
-飨
-惟
-绗
-祜
-澈
-啼
-咪
-磷
-摞
-诅
-郦
-抹
-跃
-壬
-吕
-肖
-琏
-颤
-尴
-剡
-抠
-凋
-赚
-泊
-津
-宕
-殷
-倔
-氲
-漫
-邺
-涎
-怠
-$
-垮
-荬
-遵
-俏
-叹
-噢
-饽
-蜘
-孙
-筵
-疼
-鞭
-羧
-牦
-箭
-潴
-c
-眸
-祭
-髯
-啖
-坳
-愁
-芩
-驮
-倡
-巽
-穰
-沃
-胚
-怒
-凤
-槛
-剂
-趵
-嫁
-v
-邢
-灯
-鄢
-桐
-睽
-檗
-锯
-槟
-婷
-嵋
-圻
-诗
-蕈
-颠
-遭
-痢
-芸
-怯
-馥
-竭
-锗
-徜
-恭
-遍
-籁
-剑
-嘱
-苡
-龄
-僧
-桑
-潸
-弘
-澶
-楹
-悲
-讫
-愤
-腥
-悸
-谍
-椹
-呢
-桓
-葭
-攫
-阀
-翰
-躲
-敖
-柑
-郎
-笨
-橇
-呃
-魁
-燎
-脓
-葩
-磋
-垛
-玺
-狮
-沓
-砜
-蕊
-锺
-罹
-蕉
-翱
-虐
-闾
-巫
-旦
-茱
-嬷
-枯
-鹏
-贡
-芹
-汛
-矫
-绁
-拣
-禺
-佃
-讣
-舫
-惯
-乳
-趋
-疲
-挽
-岚
-虾
-衾
-蠹
-蹂
-飓
-氦
-铖
-孩
-稞
-瑜
-壅
-掀
-勘
-妓
-畅
-髋
-W
-庐
-牲
-蓿
-榕
-练
-垣
-唱
-邸
-菲
-昆
-婺
-穿
-绡
-麒
-蚱
-掂
-愚
-泷
-涪
-漳
-妩
-娉
-榄
-讷
-觅
-旧
-藤
-煮
-呛
-柳
-腓
-叭
-庵
-烷
-阡
-罂
-蜕
-擂
-猖
-咿
-媲
-脉
-【
-沏
-貅
-黠
-熏
-哲
-烁
-坦
-酵
-兜
-×
-潇
-撒
-剽
-珩
-圹
-乾
-摸
-樟
-帽
-嗒
-襄
-魂
-轿
-憬
-锡
-〕
-喃
-皆
-咖
-隅
-脸
-残
-泮
-袂
-鹂
-珊
-囤
-捆
-咤
-误
-徨
-闹
-淙
-芊
-淋
-怆
-囗
-拨
-梳
-渤
-R
-G
-绨
-蚓
-婀
-幡
-狩
-麾
-谢
-唢
-裸
-旌
-伉
-纶
-裂
-驳
-砼
-咛
-澄
-樨
-蹈
-宙
-澍
-倍
-貔
-操
-勇
-蟠
-摈
-砧
-虬
-够
-缁
-悦
-藿
-撸
-艹
-摁
-淹
-豇
-虎
-榭
-ˉ
-吱
-d
-°
-喧
-荀
-踱
-侮
-奋
-偕
-饷
-犍
-惮
-坑
-璎
-徘
-宛
-妆
-袈
-倩
-窦
-昂
-荏
-乖
-K
-怅
-撰
-鳙
-牙
-袁
-酞
-X
-痿
-琼
-闸
-雁
-趾
-荚
-虻
-涝
-《
-杏
-韭
-偈
-烤
-绫
-鞘
-卉
-症
-遢
-蓥
-诋
-杭
-荨
-匆
-竣
-簪
-辙
-敕
-虞
-丹
-缭
-咩
-黟
-m
-淤
-瑕
-咂
-铉
-硼
-茨
-嶂
-痒
-畸
-敬
-涿
-粪
-窘
-熟
-叔
-嫔
-盾
-忱
-裘
-憾
-梵
-赡
-珙
-咯
-娘
-庙
-溯
-胺
-葱
-痪
-摊
-荷
-卞
-乒
-髦
-寐
-铭
-坩
-胗
-枷
-爆
-溟
-嚼
-羚
-砬
-轨
-惊
-挠
-罄
-竽
-菏
-氧
-浅
-楣
-盼
-枢
-炸
-阆
-杯
-谏
-噬
-淇
-渺
-俪
-秆
-墓
-泪
-跻
-砌
-痰
-垡
-渡
-耽
-釜
-讶
-鳎
-煞
-呗
-韶
-舶
-绷
-鹳
-缜
-旷
-铊
-皱
-龌
-檀
-霖
-奄
-槐
-艳
-蝶
-旋
-哝
-赶
-骞
-蚧
-腊
-盈
-丁
-`
-蜚
-矸
-蝙
-睨
-嚓
-僻
-鬼
-醴
-夜
-彝
-磊
-笔
-拔
-栀
-糕
-厦
-邰
-纫
-逭
-纤
-眦
-膊
-馍
-躇
-烯
-蘼
-冬
-诤
-暄
-骶
-哑
-瘠
-」
-臊
-丕
-愈
-咱
-螺
-擅
-跋
-搏
-硪
-谄
-笠
-淡
-嘿
-骅
-谧
-鼎
-皋
-姚
-歼
-蠢
-驼
-耳
-胬
-挝
-涯
-狗
-蒽
-孓
-犷
-凉
-芦
-箴
-铤
-孤
-嘛
-坤
-V
-茴
-朦
-挞
-尖
-橙
-诞
-搴
-碇
-洵
-浚
-帚
-蜍
-漯
-柘
-嚎
-讽
-芭
-荤
-咻
-祠
-秉
-跖
-埃
-吓
-糯
-眷
-馒
-惹
-娼
-鲑
-嫩
-讴
-轮
-瞥
-靶
-褚
-乏
-缤
-宋
-帧
-删
-驱
-碎
-扑
-俩
-俄
-偏
-涣
-竹
-噱
-皙
-佰
-渚
-唧
-斡
-#
-镉
-刀
-崎
-筐
-佣
-夭
-贰
-肴
-峙
-哔
-艿
-匐
-牺
-镛
-缘
-仡
-嫡
-劣
-枸
-堀
-梨
-簿
-鸭
-蒸
-亦
-稽
-浴
-{
-衢
-束
-槲
-j
-阁
-揍
-疥
-棋
-潋
-聪
-窜
-乓
-睛
-插
-冉
-阪
-苍
-搽
-「
-蟾
-螟
-幸
-仇
-樽
-撂
-慢
-跤
-幔
-俚
-淅
-覃
-觊
-溶
-妖
-帛
-侨
-曰
-妾
-泗
-·
-:
-瀘
-風
-Ë
-(
-)
-∶
-紅
-紗
-瑭
-雲
-頭
-鶏
-財
-許
-•
-¥
-樂
-焗
-麗
-—
-;
-滙
-東
-榮
-繪
-興
-…
-門
-業
-π
-楊
-國
-顧
-é
-盤
-寳
-Λ
-龍
-鳳
-島
-誌
-緣
-結
-銭
-萬
-勝
-祎
-璟
-優
-歡
-臨
-時
-購
-=
-★
-藍
-昇
-鐵
-觀
-勅
-農
-聲
-畫
-兿
-術
-發
-劉
-記
-專
-耑
-園
-書
-壴
-種
-Ο
-●
-褀
-號
-銀
-匯
-敟
-锘
-葉
-橪
-廣
-進
-蒄
-鑽
-阝
-祙
-貢
-鍋
-豊
-夬
-喆
-團
-閣
-開
-燁
-賓
-館
-酡
-沔
-順
-+
-硚
-劵
-饸
-陽
-車
-湓
-復
-萊
-氣
-軒
-華
-堃
-迮
-纟
-戶
-馬
-學
-裡
-電
-嶽
-獨
-マ
-シ
-サ
-ジ
-燘
-袪
-環
-❤
-臺
-灣
-専
-賣
-孖
-聖
-攝
-線
-▪
-α
-傢
-俬
-夢
-達
-莊
-喬
-貝
-薩
-劍
-羅
-壓
-棛
-饦
-尃
-璈
-囍
-醫
-G
-I
-A
-#
-N
-鷄
-髙
-嬰
-啓
-約
-隹
-潔
-賴
-藝
-~
-寶
-籣
-麺
-
-嶺
-√
-義
-網
-峩
-長
-∧
-魚
-機
-構
-②
-鳯
-偉
-L
-B
-㙟
-畵
-鴿
-'
-詩
-溝
-嚞
-屌
-藔
-佧
-玥
-蘭
-織
-1
-3
-9
-0
-7
-點
-砭
-鴨
-鋪
-銘
-廳
-弍
-‧
-創
-湯
-坶
-℃
-卩
-骝
-&
-烜
-荘
-當
-潤
-扞
-係
-懷
-碶
-钅
-蚨
-讠
-☆
-叢
-爲
-埗
-涫
-塗
-→
-楽
-現
-鯨
-愛
-瑪
-鈺
-忄
-悶
-藥
-飾
-樓
-視
-孬
-ㆍ
-燚
-苪
-師
-①
-丼
-锽
-│
-韓
-標
-è
-兒
-閏
-匋
-張
-漢
-Ü
-髪
-會
-閑
-檔
-習
-裝
-の
-峯
-菘
-輝
-И
-雞
-釣
-億
-浐
-K
-O
-R
-8
-H
-E
-P
-T
-W
-D
-S
-C
-M
-F
-姌
-饹
-»
-晞
-廰
-ä
-嵯
-鷹
-負
-飲
-絲
-冚
-楗
-澤
-綫
-區
-❋
-←
-質
-靑
-揚
-③
-滬
-統
-産
-協
-﹑
-乸
-畐
-經
-運
-際
-洺
-岽
-為
-粵
-諾
-崋
-豐
-碁
-ɔ
-V
-2
-6
-齋
-誠
-訂
-´
-勑
-雙
-陳
-無
-í
-泩
-媄
-夌
-刂
-i
-c
-t
-o
-r
-a
-嘢
-耄
-燴
-暃
-壽
-媽
-靈
-抻
-體
-唻
-É
-冮
-甹
-鎮
-錦
-ʌ
-蜛
-蠄
-尓
-駕
-戀
-飬
-逹
-倫
-貴
-極
-Я
-Й
-寬
-磚
-嶪
-郎
-職
-|
-間
-n
-d
-剎
-伈
-課
-飛
-橋
-瘊
-№
-譜
-骓
-圗
-滘
-縣
-粿
-咅
-養
-濤
-彳
-®
-%
-Ⅱ
-啰
-㴪
-見
-矞
-薬
-糁
-邨
-鲮
-顔
-罱
-З
-選
-話
-贏
-氪
-俵
-競
-瑩
-繡
-枱
-β
-綉
-á
-獅
-爾
-™
-麵
-戋
-淩
-徳
-個
-劇
-場
-務
-簡
-寵
-h
-實
-膠
-轱
-圖
-築
-嘣
-樹
-㸃
-營
-耵
-孫
-饃
-鄺
-飯
-麯
-遠
-輸
-坫
-孃
-乚
-閃
-鏢
-㎡
-題
-廠
-關
-↑
-爺
-將
-軍
-連
-篦
-覌
-參
-箸
--
-窠
-棽
-寕
-夀
-爰
-歐
-呙
-閥
-頡
-熱
-雎
-垟
-裟
-凬
-勁
-帑
-馕
-夆
-疌
-枼
-馮
-貨
-蒤
-樸
-彧
-旸
-靜
-龢
-暢
-㐱
-鳥
-珺
-鏡
-灡
-爭
-堷
-廚
-Ó
-騰
-診
-┅
-蘇
-褔
-凱
-頂
-豕
-亞
-帥
-嘬
-⊥
-仺
-桖
-複
-饣
-絡
-穂
-顏
-棟
-納
-▏
-濟
-親
-設
-計
-攵
-埌
-烺
-ò
-頤
-燦
-蓮
-撻
-節
-講
-濱
-濃
-娽
-洳
-朿
-燈
-鈴
-護
-膚
-铔
-過
-補
-Z
-U
-5
-4
-坋
-闿
-䖝
-餘
-缐
-铞
-貿
-铪
-桼
-趙
-鍊
-[
-㐂
-垚
-菓
-揸
-捲
-鐘
-滏
-𣇉
-爍
-輪
-燜
-鴻
-鮮
-動
-鹞
-鷗
-丄
-慶
-鉌
-翥
-飮
-腸
-⇋
-漁
-覺
-來
-熘
-昴
-翏
-鲱
-圧
-鄉
-萭
-頔
-爐
-嫚
-г
-貭
-類
-聯
-幛
-輕
-訓
-鑒
-夋
-锨
-芃
-珣
-䝉
-扙
-嵐
-銷
-處
-ㄱ
-語
-誘
-苝
-歸
-儀
-燒
-楿
-內
-粢
-葒
-奧
-麥
-礻
-滿
-蠔
-穵
-瞭
-態
-鱬
-榞
-硂
-鄭
-黃
-煙
-祐
-奓
-逺
-*
-瑄
-獲
-聞
-薦
-讀
-這
-樣
-決
-問
-啟
-們
-執
-説
-轉
-單
-隨
-唘
-帶
-倉
-庫
-還
-贈
-尙
-皺
-■
-餅
-產
-○
-∈
-報
-狀
-楓
-賠
-琯
-嗮
-禮
-`
-傳
->
-≤
-嗞
-Φ
-≥
-換
-咭
-∣
-↓
-曬
-ε
-応
-寫
-″
-終
-様
-純
-費
-療
-聨
-凍
-壐
-郵
-ü
-黒
-∫
-製
-塊
-調
-軽
-確
-撃
-級
-馴
-Ⅲ
-涇
-繹
-數
-碼
-證
-狒
-処
-劑
-<
-晧
-賀
-衆
-]
-櫥
-兩
-陰
-絶
-對
-鯉
-憶
-◎
-p
-e
-Y
-蕒
-煖
-頓
-測
-試
-鼽
-僑
-碩
-妝
-帯
-≈
-鐡
-舖
-權
-喫
-倆
-ˋ
-該
-悅
-ā
-俫
-.
-f
-s
-b
-m
-k
-g
-u
-j
-貼
-淨
-濕
-針
-適
-備
-l
-/
-給
-謢
-強
-觸
-衛
-與
-⊙
-$
-緯
-變
-⑴
-⑵
-⑶
-㎏
-殺
-∩
-幚
-─
-價
-▲
-離
-ú
-ó
-飄
-烏
-関
-閟
-﹝
-﹞
-邏
-輯
-鍵
-驗
-訣
-導
-歷
-屆
-層
-▼
-儱
-錄
-熳
-ē
-艦
-吋
-錶
-辧
-飼
-顯
-④
-禦
-販
-気
-対
-枰
-閩
-紀
-幹
-瞓
-貊
-淚
-△
-眞
-墊
-Ω
-獻
-褲
-縫
-緑
-亜
-鉅
-餠
-{
-}
-◆
-蘆
-薈
-█
-◇
-溫
-彈
-晳
-粧
-犸
-穩
-訊
-崬
-凖
-熥
-П
-舊
-條
-紋
-圍
-Ⅳ
-筆
-尷
-難
-雜
-錯
-綁
-識
-頰
-鎖
-艶
-□
-殁
-殼
-⑧
-├
-▕
-鵬
-ǐ
-ō
-ǒ
-糝
-綱
-▎
-μ
-盜
-饅
-醬
-籤
-蓋
-釀
-鹽
-據
-à
-ɡ
-辦
-◥
-彐
-┌
-婦
-獸
-鲩
-伱
-ī
-蒟
-蒻
-齊
-袆
-腦
-寧
-凈
-妳
-煥
-詢
-偽
-謹
-啫
-鯽
-騷
-鱸
-損
-傷
-鎻
-髮
-買
-冏
-儥
-両
-﹢
-∞
-載
-喰
-z
-羙
-悵
-燙
-曉
-員
-組
-徹
-艷
-痠
-鋼
-鼙
-縮
-細
-嚒
-爯
-≠
-維
-"
-鱻
-壇
-厍
-帰
-浥
-犇
-薡
-軎
-²
-應
-醜
-刪
-緻
-鶴
-賜
-噁
-軌
-尨
-镔
-鷺
-槗
-彌
-葚
-濛
-請
-溇
-緹
-賢
-訪
-獴
-瑅
-資
-縤
-陣
-蕟
-栢
-韻
-祼
-恁
-伢
-謝
-劃
-涑
-總
-衖
-踺
-砋
-凉
-籃
-駿
-苼
-瘋
-昽
-紡
-驊
-腎
-﹗
-響
-杋
-剛
-嚴
-禪
-歓
-槍
-傘
-檸
-檫
-炣
-勢
-鏜
-鎢
-銑
-尐
-減
-奪
-惡
-θ
-僮
-婭
-臘
-ū
-ì
-殻
-鉄
-∑
-蛲
-焼
-緖
-續
-紹
-懮
\ No newline at end of file
diff --git a/godo/office/LICENSE b/godo/office/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/godo/office/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/godo/office/Readme.md b/godo/office/Readme.md
new file mode 100644
index 0000000..4a2dff8
--- /dev/null
+++ b/godo/office/Readme.md
@@ -0,0 +1,75 @@
+# 📄 Gh0ffice (Office/PDF File Parser)
+
+## Modifications
+- 2024=12-08 godoos: add support for odt/epub/xml/rtf/md/txt/html/json files
+
+This Go-based project provides a robust parser for various office document formats, including DOCX/DOC, PPTX/PPT, XLSX/XLS, and PDF. The parser extracts both content and metadata from these file types, allowing easy access to structured document data for further processing or analysis.
+
+## 🛠 Features
+
+- **Metadata Extraction**: Captures essential metadata such as title, author, keywords, and modification dates.
+- **Content Parsing**: Supports extraction of text content from multiple file formats.
+- **Extensible Architecture**: Easily add support for new file formats by implementing additional reader functions.
+
+## 📂 Supported Formats
+
+- **DOCX**: Extracts text content from Word documents.
+- **PPTX**: Extracts text content from PowerPoint presentations.
+- **XLSX**: Extracts data from Excel spreadsheets.
+- **DOC**: Extracts text content from Legacy Word documents.
+- **PPT**: Extracts text content from Legacy PowerPoint presentations.
+- **XLS**: Extracts data from Legacy Excel spreadsheets.
+- **PDF**: Extracts text content from PDF files (note that some complex PDFs may not be fully supported).
+
+## 📖 Installation
+
+To use this project, ensure you have Go installed on your system. Clone this repository and run the following command to install the dependencies:
+
+```bash
+go mod tidy
+```
+
+## 🚀 Usage
+
+### Basic Usage
+
+You can inspect a document and extract its content and metadata by calling the `inspectDocument` function with the file path as follows:
+
+```go
+doc, err := gh0ffice.InspectDocument("path/to/your/file.docx")
+if err != nil {
+ log.Fatalf("Error reading document: %s", err)
+}
+fmt.Printf("Title: %s\n", doc.Title)
+fmt.Printf("Content: %s\n", doc.Content)
+```
+
+### Debugging
+
+Set the `DEBUG` variable to `true` to enable logging for more verbose output during the parsing process:
+
+```go
+const DEBUG bool = true
+```
+
+## ⚠️ Limitations
+
+- The PDF parsing may fail on certain complex or malformed documents.
+- Only straightforward text extraction is performed; formatting and images are not considered.
+- Compatibility tested primarily on major office file formats.
+
+## 📝 License
+
+This project is licensed under the Apache License, Version 2.0. See the [LICENSE](LICENSE) file for more details.
+
+## 📬 Contributing
+
+Contributions are welcome! Please feel free to create issues or submit pull requests for new features or bug fixes.
+
+## 👥 Author
+
+This project is maintained by the team and community of YT-Gh0st. Contributions and engagements are always welcome!
+
+---
+
+For any questions or suggestions, feel free to reach out. Happy parsing! 😊
diff --git a/godo/office/darwin.go b/godo/office/darwin.go
new file mode 100644
index 0000000..41eb5da
--- /dev/null
+++ b/godo/office/darwin.go
@@ -0,0 +1,27 @@
+//go:build darwin
+// +build darwin
+
+package office
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+func getFileInfoData(data *Document) (bool, error) {
+ fileinfo, err := os.Stat(data.path)
+ if err != nil {
+ return false, err
+ }
+ data.Filename = fileinfo.Name()
+ data.Title = data.Filename
+ data.Size = int(fileinfo.Size())
+
+ stat := fileinfo.Sys().(*syscall.Stat_t)
+ data.Createtime = time.Unix(stat.Birthtimespec.Sec, stat.Birthtimespec.Nsec)
+ data.Modifytime = time.Unix(stat.Mtimespec.Sec, stat.Mtimespec.Nsec)
+ data.Accesstime = time.Unix(stat.Atimespec.Sec, stat.Atimespec.Nsec)
+
+ return true, nil
+}
diff --git a/godo/office/doc.go b/godo/office/doc.go
new file mode 100644
index 0000000..9540aee
--- /dev/null
+++ b/godo/office/doc.go
@@ -0,0 +1,545 @@
+package office
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "io"
+ "unicode/utf16"
+ "unicode/utf8"
+
+ "github.com/mattetti/filebuffer"
+ "github.com/richardlehane/mscfb"
+)
+
+// ---- file doc.go ----
+// There were a few changes in this file to actually support Unicode which the old code was not.
+
+var (
+ errTable = errors.New("cannot find table stream")
+ errDocEmpty = errors.New("WordDocument not found")
+ // errDocShort = errors.New("wordDoc block too short")
+ errInvalidArgument = errors.New("invalid table and/or fib")
+)
+
+type allReader interface {
+ io.Closer
+ io.ReaderAt
+ io.ReadSeeker
+}
+
+func wrapError(e error) error {
+ return errors.New("Error processing file: " + e.Error())
+}
+
+// DOC2Text converts a standard io.Reader from a Microsoft Word .doc binary file and returns a reader (actually a bytes.Buffer) which will output the plain text found in the .doc file
+func DOC2Text(r io.Reader) (io.Reader, error) {
+ ra, ok := r.(io.ReaderAt)
+ if !ok {
+ ra, _, err := toMemoryBuffer(r)
+ if err != nil {
+ return nil, wrapError(err)
+ }
+ defer ra.Close()
+ }
+
+ d, err := mscfb.New(ra)
+ if err != nil {
+ return nil, wrapError(err)
+ }
+
+ wordDoc, table0, table1 := getWordDocAndTables(d)
+ fib, err := getFib(wordDoc)
+ if err != nil {
+ return nil, wrapError(err)
+ }
+
+ table := getActiveTable(table0, table1, fib)
+ if table == nil {
+ return nil, wrapError(errTable)
+ }
+
+ clx, err := getClx(table, fib)
+ if err != nil {
+ return nil, wrapError(err)
+ }
+
+ return getText(wordDoc, clx)
+}
+
+func toMemoryBuffer(r io.Reader) (allReader, int64, error) {
+ var b bytes.Buffer
+ size, err := b.ReadFrom(r)
+ if err != nil {
+ return nil, 0, err
+ }
+ fb := filebuffer.New(b.Bytes())
+ return fb, size, nil
+}
+
+func getText(wordDoc *mscfb.File, clx *clx) (io.Reader, error) {
+ var buf bytes.Buffer
+ for i := 0; i < len(clx.pcdt.PlcPcd.aPcd); i++ {
+ pcd := clx.pcdt.PlcPcd.aPcd[i]
+ cp := clx.pcdt.PlcPcd.aCP[i]
+ cpNext := clx.pcdt.PlcPcd.aCP[i+1]
+
+ var start, end int
+ // https://msdn.microsoft.com/ko-kr/library/office/gg615596(v=office.14).aspx
+ // Read the value of the Pcd.Fc.fCompressed field at bit 46 of the current Pcd structure. If 0, the Pcd structure refers to a 16-bit Unicode character. If 1, it refers to an 8-bit ANSI character.
+ if pcd.fc.fCompressed {
+ start = pcd.fc.fc / 2
+ end = start + cpNext - cp
+ } else {
+ // -> 16-bit Unicode characters
+ start = pcd.fc.fc
+ end = start + 2*(cpNext-cp)
+ }
+
+ b := make([]byte, end-start)
+ _, err := wordDoc.ReadAt(b, int64(start)) // read all the characters
+ if err != nil {
+ return nil, err
+ }
+ translateText(b, &buf, pcd.fc.fCompressed)
+ }
+ return &buf, nil
+}
+
+// translateText translates the buffer into text. fCompressed = 0 for 16-bit Unicode, 1 = 8-bit ANSI characters.
+func translateText(b []byte, buf *bytes.Buffer, fCompressed bool) {
+ u16s := make([]uint16, 1)
+ b8buf := make([]byte, 4)
+
+ fieldLevel := 0
+ var isFieldChar bool
+ for cIndex := range b {
+ // Convert to rune
+ var char rune
+ if fCompressed {
+ // ANSI, 1 byte
+ char = rune(b[cIndex])
+ } else {
+ // 16-bit Unicode: skip every second byte
+ if cIndex%2 != 0 {
+ continue
+ } else if (cIndex + 1) >= len(b) { // make sure there are at least 2 bytes for Unicode decoding
+ continue
+ }
+
+ // convert from UTF16 to UTF8
+ u16s[0] = uint16(b[cIndex]) + (uint16(b[cIndex+1]) << 8)
+ r := utf16.Decode(u16s)
+ if len(r) != 1 {
+ //fmt.Printf("Invalid rune %v\n", r)
+ continue
+ }
+ char = r[0]
+ }
+
+ // Handle special field characters (section 2.8.25)
+ if char == 0x13 {
+ isFieldChar = true
+ fieldLevel++
+ continue
+ } else if char == 0x14 {
+ isFieldChar = false
+ continue
+ } else if char == 0x15 {
+ isFieldChar = false
+ continue
+ } else if isFieldChar {
+ continue
+ }
+
+ if char == 7 { // table column separator
+ buf.WriteByte(' ')
+ continue
+ } else if char < 32 && char != 9 && char != 10 && char != 13 { // skip non-printable ASCII characters
+ //buf.Write([]byte(fmt.Sprintf("|%#x|", char)))
+ continue
+ }
+
+ if fCompressed { // compressed, so replace compressed characters
+ buf.Write(replaceCompressed(byte(char)))
+ } else {
+ // encode the rune to UTF-8
+ n := utf8.EncodeRune(b8buf, char)
+ buf.Write(b8buf[:n])
+ }
+ }
+}
+
+func replaceCompressed(char byte) []byte {
+ var v uint16
+ switch char {
+ case 0x82:
+ v = 0x201A
+ case 0x83:
+ v = 0x0192
+ case 0x84:
+ v = 0x201E
+ case 0x85:
+ v = 0x2026
+ case 0x86:
+ v = 0x2020
+ case 0x87:
+ v = 0x2021
+ case 0x88:
+ v = 0x02C6
+ case 0x89:
+ v = 0x2030
+ case 0x8A:
+ v = 0x0160
+ case 0x8B:
+ v = 0x2039
+ case 0x8C:
+ v = 0x0152
+ case 0x91:
+ v = 0x2018
+ case 0x92:
+ v = 0x2019
+ case 0x93:
+ v = 0x201C
+ case 0x94:
+ v = 0x201D
+ case 0x95:
+ v = 0x2022
+ case 0x96:
+ v = 0x2013
+ case 0x97:
+ v = 0x2014
+ case 0x98:
+ v = 0x02DC
+ case 0x99:
+ v = 0x2122
+ case 0x9A:
+ v = 0x0161
+ case 0x9B:
+ v = 0x203A
+ case 0x9C:
+ v = 0x0153
+ case 0x9F:
+ v = 0x0178
+ default:
+ return []byte{char}
+ }
+ out := make([]byte, 2)
+ binary.LittleEndian.PutUint16(out, v)
+ return out
+}
+
+func getWordDocAndTables(r *mscfb.Reader) (*mscfb.File, *mscfb.File, *mscfb.File) {
+ var wordDoc, table0, table1 *mscfb.File
+ for i := 0; i < len(r.File); i++ {
+ stream := r.File[i]
+
+ switch stream.Name {
+ case "WordDocument":
+ wordDoc = stream
+ case "0Table":
+ table0 = stream
+ case "1Table":
+ table1 = stream
+ }
+ }
+ return wordDoc, table0, table1
+}
+
+func getActiveTable(table0 *mscfb.File, table1 *mscfb.File, f *fib) *mscfb.File {
+ if f.base.fWhichTblStm == 0 {
+ return table0
+ }
+ return table1
+}
+
+// ---- file fib.go ----
+
+var (
+ errFibInvalid = errors.New("file information block validation failed")
+)
+
+type fib struct {
+ base fibBase
+ csw int
+ fibRgW fibRgW
+ cslw int
+ fibRgLw fibRgLw
+ cbRgFcLcb int
+ fibRgFcLcb fibRgFcLcb
+}
+
+type fibBase struct {
+ fWhichTblStm int
+}
+
+type fibRgW struct {
+}
+
+type fibRgLw struct {
+ ccpText int
+ ccpFtn int
+ ccpHdd int
+ ccpMcr int
+ ccpAtn int
+ ccpEdn int
+ ccpTxbx int
+ ccpHdrTxbx int
+ cpLength int
+}
+
+type fibRgFcLcb struct {
+ fcPlcfFldMom int
+ lcbPlcfFldMom int
+ fcPlcfFldHdr int
+ lcbPlcfFldHdr int
+ fcPlcfFldFtn int
+ lcbPlcfFldFtn int
+ fcPlcfFldAtn int
+ lcbPlcfFldAtn int
+ fcClx int
+ lcbClx int
+}
+
+// parse File Information Block (section 2.5.1)
+func getFib(wordDoc *mscfb.File) (*fib, error) {
+ if wordDoc == nil {
+ return nil, errDocEmpty
+ }
+
+ b := make([]byte, 898) // get FIB block up to FibRgFcLcb97
+ _, err := wordDoc.ReadAt(b, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ fibBase := getFibBase(b[0:32])
+
+ fibRgW, csw, err := getFibRgW(b, 32)
+ if err != nil {
+ return nil, err
+ }
+
+ fibRgLw, cslw, err := getFibRgLw(b, 34+csw)
+ if err != nil {
+ return nil, err
+ }
+
+ fibRgFcLcb, cbRgFcLcb, err := getFibRgFcLcb(b, 34+csw+2+cslw)
+
+ return &fib{base: *fibBase, csw: csw, cslw: cslw, fibRgW: *fibRgW, fibRgLw: *fibRgLw, fibRgFcLcb: *fibRgFcLcb, cbRgFcLcb: cbRgFcLcb}, err
+}
+
+// parse FibBase (section 2.5.2)
+func getFibBase(fib []byte) *fibBase {
+ byt := fib[11] // fWhichTblStm is 2nd highest bit in this byte
+ fWhichTblStm := int(byt >> 1 & 1) // set which table (0Table or 1Table) is the table stream
+ return &fibBase{fWhichTblStm: fWhichTblStm}
+}
+
+func getFibRgW(fib []byte, start int) (*fibRgW, int, error) {
+ if start+2 >= len(fib) { // must be big enough for csw
+ return &fibRgW{}, 0, errFibInvalid
+ }
+
+ csw := int(binary.LittleEndian.Uint16(fib[start:start+2])) * 2 // in bytes
+ return &fibRgW{}, csw, nil
+}
+
+// parse FibRgLw (section 2.5.4)
+func getFibRgLw(fib []byte, start int) (*fibRgLw, int, error) {
+ fibRgLwStart := start + 2 // skip cslw
+ if fibRgLwStart+88 >= len(fib) { // expect 88 bytes in fibRgLw
+ return &fibRgLw{}, 0, errFibInvalid
+ }
+
+ cslw := getInt16(fib, start) * 4 // in bytes
+ ccpText := getInt(fib, fibRgLwStart+3*4)
+ ccpFtn := getInt(fib, fibRgLwStart+4*4)
+ ccpHdd := getInt(fib, fibRgLwStart+5*4)
+ ccpMcr := getInt(fib, fibRgLwStart+6*4)
+ ccpAtn := getInt(fib, fibRgLwStart+7*4)
+ ccpEdn := getInt(fib, fibRgLwStart+8*4)
+ ccpTxbx := getInt(fib, fibRgLwStart+9*4)
+ ccpHdrTxbx := getInt(fib, fibRgLwStart+10*4)
+
+ // calculate cpLength. Used in PlcPcd verification (see section 2.8.35)
+ var cpLength int
+ if ccpFtn != 0 || ccpHdd != 0 || ccpMcr != 0 || ccpAtn != 0 || ccpEdn != 0 || ccpTxbx != 0 || ccpHdrTxbx != 0 {
+ cpLength = ccpFtn + ccpHdd + ccpMcr + ccpAtn + ccpEdn + ccpTxbx + ccpHdrTxbx + ccpText + 1
+ } else {
+ cpLength = ccpText
+ }
+ return &fibRgLw{ccpText: ccpText, ccpFtn: ccpFtn, ccpHdd: ccpHdd, ccpMcr: ccpMcr, ccpAtn: ccpAtn,
+ ccpEdn: ccpEdn, ccpTxbx: ccpTxbx, ccpHdrTxbx: ccpHdrTxbx, cpLength: cpLength}, cslw, nil
+}
+
+// parse FibRgFcLcb (section 2.5.5)
+func getFibRgFcLcb(fib []byte, start int) (*fibRgFcLcb, int, error) {
+ fibRgFcLcbStart := start + 2 // skip cbRgFcLcb
+ if fibRgFcLcbStart+186*4 < len(fib) { // expect 186+ values in FibRgFcLcb
+ return &fibRgFcLcb{}, 0, errFibInvalid
+ }
+
+ cbRgFcLcb := getInt16(fib, start)
+ fcPlcfFldMom := getInt(fib, fibRgFcLcbStart+32*4)
+ lcbPlcfFldMom := getInt(fib, fibRgFcLcbStart+33*4)
+ fcPlcfFldHdr := getInt(fib, fibRgFcLcbStart+34*4)
+ lcbPlcfFldHdr := getInt(fib, fibRgFcLcbStart+35*4)
+ fcPlcfFldFtn := getInt(fib, fibRgFcLcbStart+36*4)
+ lcbPlcfFldFtn := getInt(fib, fibRgFcLcbStart+37*4)
+ fcPlcfFldAtn := getInt(fib, fibRgFcLcbStart+38*4)
+ lcbPlcfFldAtn := getInt(fib, fibRgFcLcbStart+39*4)
+ fcClx := getInt(fib, fibRgFcLcbStart+66*4)
+ lcbClx := getInt(fib, fibRgFcLcbStart+67*4)
+ return &fibRgFcLcb{fcPlcfFldMom: fcPlcfFldMom, lcbPlcfFldMom: lcbPlcfFldMom, fcPlcfFldHdr: fcPlcfFldHdr, lcbPlcfFldHdr: lcbPlcfFldHdr,
+ fcPlcfFldFtn: fcPlcfFldFtn, lcbPlcfFldFtn: lcbPlcfFldFtn, fcPlcfFldAtn: fcPlcfFldAtn, lcbPlcfFldAtn: lcbPlcfFldAtn,
+ fcClx: fcClx, lcbClx: lcbClx}, cbRgFcLcb, nil
+}
+
+func getInt16(buf []byte, start int) int {
+ return int(binary.LittleEndian.Uint16(buf[start : start+2]))
+}
+func getInt(buf []byte, start int) int {
+ return int(binary.LittleEndian.Uint32(buf[start : start+4]))
+}
+
+// ---- file clx.go ----
+
+var (
+ errInvalidPrc = errors.New("invalid Prc structure")
+ errInvalidClx = errors.New("expected last aCP value to equal fib.cpLength (2.8.35)")
+ errInvalidPcdt = errors.New("expected clxt to be equal 0x02")
+)
+
+type clx struct {
+ pcdt pcdt
+}
+
+type pcdt struct {
+ lcb int
+ PlcPcd plcPcd
+}
+
+type plcPcd struct {
+ aCP []int
+ aPcd []pcd
+}
+
+type pcd struct {
+ fc fcCompressed
+}
+
+type fcCompressed struct {
+ fc int
+ fCompressed bool
+}
+
+// read Clx (section 2.9.38)
+func getClx(table *mscfb.File, fib *fib) (*clx, error) {
+ if table == nil || fib == nil {
+ return nil, errInvalidArgument
+ }
+ b, err := readClx(table, fib)
+ if err != nil {
+ return nil, err
+ }
+
+ pcdtOffset, err := getPrcArrayEnd(b)
+ if err != nil {
+ return nil, err
+ }
+
+ pcdt, err := getPcdt(b, pcdtOffset)
+ if err != nil {
+ return nil, err
+ }
+
+ if pcdt.PlcPcd.aCP[len(pcdt.PlcPcd.aCP)-1] != fib.fibRgLw.cpLength {
+ return nil, errInvalidClx
+ }
+
+ return &clx{pcdt: *pcdt}, nil
+}
+
+func readClx(table *mscfb.File, fib *fib) ([]byte, error) {
+ b := make([]byte, fib.fibRgFcLcb.lcbClx)
+ _, err := table.ReadAt(b, int64(fib.fibRgFcLcb.fcClx))
+ if err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+// read Pcdt from Clx (section 2.9.178)
+func getPcdt(clx []byte, pcdtOffset int) (*pcdt, error) {
+ const pcdSize = 8
+ if pcdtOffset < 0 || pcdtOffset+5 >= len(clx) {
+ return nil, errInvalidPcdt
+ }
+ if clx[pcdtOffset] != 0x02 { // clxt must be 0x02 or invalid
+ return nil, errInvalidPcdt
+ }
+ lcb := int(binary.LittleEndian.Uint32(clx[pcdtOffset+1 : pcdtOffset+5])) // skip clxt, get lcb
+ plcPcdOffset := pcdtOffset + 5 // skip clxt and lcb
+ numPcds := (lcb - 4) / (4 + pcdSize) // see 2.2.2 in the spec for equation
+ numCps := numPcds + 1 // always 1 more cp than pcds
+
+ cps := make([]int, numCps)
+ for i := 0; i < numCps; i++ {
+ cpOffset := plcPcdOffset + i*4
+ if cpOffset < 0 || cpOffset+4 >= len(clx) {
+ return nil, errInvalidPcdt
+ }
+ cps[i] = int(binary.LittleEndian.Uint32(clx[cpOffset : cpOffset+4]))
+ }
+
+ pcdStart := plcPcdOffset + 4*numCps
+ pcds := make([]pcd, numPcds)
+ for i := 0; i < numPcds; i++ {
+ pcdOffset := pcdStart + i*pcdSize
+ if pcdOffset < 0 || pcdOffset+pcdSize > len(clx) {
+ return nil, errInvalidPcdt
+ }
+ pcds[i] = *parsePcd(clx[pcdOffset : pcdOffset+pcdSize])
+ }
+ return &pcdt{lcb: lcb, PlcPcd: plcPcd{aCP: cps, aPcd: pcds}}, nil
+}
+
+// find end of RgPrc array (section 2.9.38)
+func getPrcArrayEnd(clx []byte) (int, error) {
+ prcOffset := 0
+ count := 0
+ for {
+ clxt := clx[prcOffset]
+ if clxt != 0x01 { // this is not a Prc, so exit
+ return prcOffset, nil
+ }
+ prcDataCbGrpprl := binary.LittleEndian.Uint16(clx[prcOffset+1 : prcOffset+3]) // skip the clxt and read 2 bytes
+ prcOffset += 1 + 2 + int(prcDataCbGrpprl) // skip clxt, cbGrpprl, and GrpPrl
+
+ if count > 10000 || prcDataCbGrpprl <= 0 || prcOffset+3 > len(clx) { // ensure no infinite loop
+ return 0, errInvalidPrc
+ }
+ count++
+ }
+}
+
+// parse Pcd (section 2.9.177)
+func parsePcd(pcdData []byte) *pcd {
+ return &pcd{fc: *parseFcCompressed(pcdData[2:6])}
+}
+
+// parse FcCompressed (section 2.9.73)
+func parseFcCompressed(fcData []byte) *fcCompressed {
+ fCompressed := fcData[3]&64 == 64 // check fcompressed value (second bit from lestmost of the last byte in fcdata)
+ fcData[3] = fcData[3] & 63 // clear the fcompressed value from data
+ fc := binary.LittleEndian.Uint32(fcData) // word doc generally uses little endian order (1.3.7)
+ return &fcCompressed{fc: int(fc), fCompressed: fCompressed}
+}
+
+// IsFileDOC checks if the data indicates a DOC file
+// DOC has multiple signature according to https://filesignatures.net/index.php?search=doc&mode=EXT, D0 CF 11 E0 A1 B1 1A E1
+func IsFileDOC(data []byte) bool {
+ return bytes.HasPrefix(data, []byte{0xD0, 0xCF, 0x11, 0xE0, 0xA1, 0xB1, 0x1A, 0xE1})
+}
diff --git a/godo/office/docx.go b/godo/office/docx.go
new file mode 100644
index 0000000..95aa5c3
--- /dev/null
+++ b/godo/office/docx.go
@@ -0,0 +1,412 @@
+package office
+
+import (
+ "archive/zip"
+ "bufio"
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "strings"
+)
+
+// Contains functions to work with data from a zip file
+type ZipData interface {
+ files() []*zip.File
+ close() error
+}
+
+// Type for in memory zip files
+type ZipInMemory struct {
+ data *zip.Reader
+}
+
+func (d ZipInMemory) files() []*zip.File {
+ return d.data.File
+}
+
+// Since there is nothing to close for in memory, just nil the data and return nil
+func (d ZipInMemory) close() error {
+ return nil
+}
+
+// Type for zip files read from disk
+type ZipFile struct {
+ data *zip.ReadCloser
+}
+
+func (d ZipFile) files() []*zip.File {
+ return d.data.File
+}
+
+func (d ZipFile) close() error {
+ return d.data.Close()
+}
+
+type ReplaceDocx struct {
+ zipReader ZipData
+ content string
+ links string
+ headers map[string]string
+ footers map[string]string
+ images map[string]string
+}
+
+func (r *ReplaceDocx) Editable() *Docx {
+ return &Docx{
+ files: r.zipReader.files(),
+ content: r.content,
+ links: r.links,
+ headers: r.headers,
+ footers: r.footers,
+ images: r.images,
+ }
+}
+
+func (r *ReplaceDocx) Close() error {
+ return r.zipReader.close()
+}
+
+type Docx struct {
+ files []*zip.File
+ content string
+ links string
+ headers map[string]string
+ footers map[string]string
+ images map[string]string
+}
+
+func (d *Docx) GetContent() string {
+ return d.content
+}
+
+func (d *Docx) SetContent(content string) {
+ d.content = content
+}
+
+func (d *Docx) ReplaceRaw(oldString string, newString string, num int) {
+ d.content = strings.Replace(d.content, oldString, newString, num)
+}
+
+func (d *Docx) Replace(oldString string, newString string, num int) (err error) {
+ oldString, err = encode(oldString)
+ if err != nil {
+ return err
+ }
+ newString, err = encode(newString)
+ if err != nil {
+ return err
+ }
+ d.content = strings.Replace(d.content, oldString, newString, num)
+
+ return nil
+}
+
+func (d *Docx) ReplaceLink(oldString string, newString string, num int) (err error) {
+ oldString, err = encode(oldString)
+ if err != nil {
+ return err
+ }
+ newString, err = encode(newString)
+ if err != nil {
+ return err
+ }
+ d.links = strings.Replace(d.links, oldString, newString, num)
+
+ return nil
+}
+
+func (d *Docx) ReplaceHeader(oldString string, newString string) (err error) {
+ return replaceHeaderFooter(d.headers, oldString, newString)
+}
+
+func (d *Docx) ReplaceFooter(oldString string, newString string) (err error) {
+ return replaceHeaderFooter(d.footers, oldString, newString)
+}
+
+func (d *Docx) WriteToFile(path string) (err error) {
+ var target *os.File
+ target, err = os.Create(path)
+ if err != nil {
+ return
+ }
+ defer target.Close()
+ err = d.Write(target)
+ return
+}
+
+func (d *Docx) Write(ioWriter io.Writer) (err error) {
+ w := zip.NewWriter(ioWriter)
+ for _, file := range d.files {
+ var writer io.Writer
+ var readCloser io.ReadCloser
+
+ writer, err = w.Create(file.Name)
+ if err != nil {
+ return err
+ }
+ readCloser, err = file.Open()
+ if err != nil {
+ return err
+ }
+ if file.Name == "word/document.xml" {
+ writer.Write([]byte(d.content))
+ } else if file.Name == "word/_rels/document.xml.rels" {
+ writer.Write([]byte(d.links))
+ } else if strings.Contains(file.Name, "header") && d.headers[file.Name] != "" {
+ writer.Write([]byte(d.headers[file.Name]))
+ } else if strings.Contains(file.Name, "footer") && d.footers[file.Name] != "" {
+ writer.Write([]byte(d.footers[file.Name]))
+ } else if strings.HasPrefix(file.Name, "word/media/") && d.images[file.Name] != "" {
+ newImage, err := os.Open(d.images[file.Name])
+ if err != nil {
+ return err
+ }
+ writer.Write(streamToByte(newImage))
+ newImage.Close()
+ } else {
+ writer.Write(streamToByte(readCloser))
+ }
+ }
+ w.Close()
+ return
+}
+
+func replaceHeaderFooter(headerFooter map[string]string, oldString string, newString string) (err error) {
+ oldString, err = encode(oldString)
+ if err != nil {
+ return err
+ }
+ newString, err = encode(newString)
+ if err != nil {
+ return err
+ }
+
+ for k := range headerFooter {
+ headerFooter[k] = strings.Replace(headerFooter[k], oldString, newString, -1)
+ }
+
+ return nil
+}
+
+// ReadDocxFromFS opens a docx file from the file system
+func ReadDocxFromFS(file string, fs fs.FS) (*ReplaceDocx, error) {
+ f, err := fs.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ buff := bytes.NewBuffer([]byte{})
+ size, err := io.Copy(buff, f)
+ if err != nil {
+ return nil, err
+ }
+ reader := bytes.NewReader(buff.Bytes())
+ return ReadDocxFromMemory(reader, size)
+}
+
+func ReadDocxFromMemory(data io.ReaderAt, size int64) (*ReplaceDocx, error) {
+ reader, err := zip.NewReader(data, size)
+ if err != nil {
+ return nil, err
+ }
+ zipData := ZipInMemory{data: reader}
+ return ReadDocx(zipData)
+}
+
+func ReadDocxFile(path string) (*ReplaceDocx, error) {
+ reader, err := zip.OpenReader(path)
+ if err != nil {
+ return nil, err
+ }
+ zipData := ZipFile{data: reader}
+ return ReadDocx(zipData)
+}
+
+func ReadDocx(reader ZipData) (*ReplaceDocx, error) {
+ content, err := readText(reader.files())
+ if err != nil {
+ return nil, err
+ }
+
+ links, err := readLinks(reader.files())
+ if err != nil {
+ return nil, err
+ }
+
+ headers, footers, _ := readHeaderFooter(reader.files())
+ images, _ := retrieveImageFilenames(reader.files())
+ return &ReplaceDocx{zipReader: reader, content: content, links: links, headers: headers, footers: footers, images: images}, nil
+}
+
+func retrieveImageFilenames(files []*zip.File) (map[string]string, error) {
+ images := make(map[string]string)
+ for _, f := range files {
+ if strings.HasPrefix(f.Name, "word/media/") {
+ images[f.Name] = ""
+ }
+ }
+ return images, nil
+}
+
+func readHeaderFooter(files []*zip.File) (headerText map[string]string, footerText map[string]string, err error) {
+
+ h, f, err := retrieveHeaderFooterDoc(files)
+
+ if err != nil {
+ return map[string]string{}, map[string]string{}, err
+ }
+
+ headerText, err = buildHeaderFooter(h)
+ if err != nil {
+ return map[string]string{}, map[string]string{}, err
+ }
+
+ footerText, err = buildHeaderFooter(f)
+ if err != nil {
+ return map[string]string{}, map[string]string{}, err
+ }
+
+ return headerText, footerText, err
+}
+
+func buildHeaderFooter(headerFooter []*zip.File) (map[string]string, error) {
+
+ headerFooterText := make(map[string]string)
+ for _, element := range headerFooter {
+ documentReader, err := element.Open()
+ if err != nil {
+ return map[string]string{}, err
+ }
+
+ text, err := wordDocToString(documentReader)
+ if err != nil {
+ return map[string]string{}, err
+ }
+
+ headerFooterText[element.Name] = text
+ }
+
+ return headerFooterText, nil
+}
+
+func readText(files []*zip.File) (text string, err error) {
+ var documentFile *zip.File
+ documentFile, err = retrieveWordDoc(files)
+ if err != nil {
+ return text, err
+ }
+ var documentReader io.ReadCloser
+ documentReader, err = documentFile.Open()
+ if err != nil {
+ return text, err
+ }
+
+ text, err = wordDocToString(documentReader)
+ return
+}
+
+func readLinks(files []*zip.File) (text string, err error) {
+ var documentFile *zip.File
+ documentFile, err = retrieveLinkDoc(files)
+ if err != nil {
+ return text, err
+ }
+ var documentReader io.ReadCloser
+ documentReader, err = documentFile.Open()
+ if err != nil {
+ return text, err
+ }
+
+ text, err = wordDocToString(documentReader)
+ return
+}
+
+func wordDocToString(reader io.Reader) (string, error) {
+ b, err := io.ReadAll(reader)
+ if err != nil {
+ return "", err
+ }
+ return string(b), nil
+}
+
+func retrieveWordDoc(files []*zip.File) (file *zip.File, err error) {
+ for _, f := range files {
+ if f.Name == "word/document.xml" {
+ file = f
+ }
+ }
+ if file == nil {
+ err = fmt.Errorf("document.xml file not found")
+ }
+ return
+}
+
+func retrieveLinkDoc(files []*zip.File) (file *zip.File, err error) {
+ for _, f := range files {
+ if f.Name == "word/_rels/document.xml.rels" {
+ file = f
+ }
+ }
+ if file == nil {
+ err = fmt.Errorf("document.xml.rels file not found")
+ }
+ return
+}
+
+func retrieveHeaderFooterDoc(files []*zip.File) (headers []*zip.File, footers []*zip.File, err error) {
+ for _, f := range files {
+
+ if strings.Contains(f.Name, "header") {
+ headers = append(headers, f)
+ }
+ if strings.Contains(f.Name, "footer") {
+ footers = append(footers, f)
+ }
+ }
+ if len(headers) == 0 && len(footers) == 0 {
+ err = fmt.Errorf("headers[1-3].xml file not found and footers[1-3].xml file not found")
+ }
+ return
+}
+
+func streamToByte(stream io.Reader) []byte {
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(stream)
+ return buf.Bytes()
+}
+
+// To get Word to recognize a tab character, we have to first close off the previous
+// text element. This means if there are multiple consecutive tabs, there are empty
+// in between but it still seems to work correctly in the output document, certainly better
+// than other combinations I tried.
+const TAB = ""
+const NEWLINE = ""
+
+func encode(s string) (string, error) {
+ var b bytes.Buffer
+ enc := xml.NewEncoder(bufio.NewWriter(&b))
+ if err := enc.Encode(s); err != nil {
+ return s, err
+ }
+ output := strings.Replace(b.String(), "", "", 1) // remove string tag
+ output = strings.Replace(output, "", "", 1)
+ output = strings.Replace(output, "
", NEWLINE, -1) // \r\n (Windows newline)
+ output = strings.Replace(output, "
", NEWLINE, -1) // \r (earlier Mac newline)
+ output = strings.Replace(output, "
", NEWLINE, -1) // \n (unix/linux/OS X newline)
+ output = strings.Replace(output, " ", TAB, -1) // \t (tab)
+ return output, nil
+}
+
+func (d *Docx) ReplaceImage(oldImage string, newImage string) (err error) {
+ if _, ok := d.images[oldImage]; ok {
+ d.images[oldImage] = newImage
+ return nil
+ }
+ return fmt.Errorf("old image: %q, file not found", oldImage)
+}
+
+func (d *Docx) ImagesLen() int {
+ return len(d.images)
+}
diff --git a/godo/ai/convert/epub.go b/godo/office/epub.go
similarity index 86%
rename from godo/ai/convert/epub.go
rename to godo/office/epub.go
index ca0e36c..edcca05 100644
--- a/godo/ai/convert/epub.go
+++ b/godo/office/epub.go
@@ -1,21 +1,4 @@
-/*
- * GodoOS - A lightweight cloud desktop
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package convert
+package office
import (
"archive/zip"
@@ -26,19 +9,13 @@ import (
"log"
"os"
"path"
-
- "godo/ai/convert/libs"
)
const containerPath = "META-INF/container.xml"
-func ConvetEpub(r io.Reader) (string, error) {
+func epub2txt(filename string) (string, error) {
text := ""
- fpath, tmpfile, err := libs.GetTempFile(r, "prefix-epub")
- if err != nil {
- return "", err
- }
- rc, _ := OpenReader(fpath)
+ rc, _ := OpenReader(filename)
book := rc.Rootfiles[0]
// Print book title.
@@ -62,8 +39,6 @@ func ConvetEpub(r io.Reader) (string, error) {
if text == "" {
return "", nil
}
-
- defer libs.CloseTempFile(tmpfile)
return title + text, nil
}
diff --git a/godo/office/etree/LICENSE b/godo/office/etree/LICENSE
new file mode 100644
index 0000000..4ebd856
--- /dev/null
+++ b/godo/office/etree/LICENSE
@@ -0,0 +1,24 @@
+Copyright 2015-2024 Brett Vickers. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/godo/office/etree/README.md b/godo/office/etree/README.md
new file mode 100644
index 0000000..98a4518
--- /dev/null
+++ b/godo/office/etree/README.md
@@ -0,0 +1,205 @@
+[](https://godoc.org/github.com/beevik/etree)
+[](https://github.com/beevik/etree/actions/workflows/go.yml)
+
+etree
+=====
+
+The etree package is a lightweight, pure go package that expresses XML in
+the form of an element tree. Its design was inspired by the Python
+[ElementTree](http://docs.python.org/2/library/xml.etree.elementtree.html)
+module.
+
+Some of the package's capabilities and features:
+
+* Represents XML documents as trees of elements for easy traversal.
+* Imports, serializes, modifies or creates XML documents from scratch.
+* Writes and reads XML to/from files, byte slices, strings and io interfaces.
+* Performs simple or complex searches with lightweight XPath-like query APIs.
+* Auto-indents XML using spaces or tabs for better readability.
+* Implemented in pure go; depends only on standard go libraries.
+* Built on top of the go [encoding/xml](http://golang.org/pkg/encoding/xml)
+ package.
+
+### Creating an XML document
+
+The following example creates an XML document from scratch using the etree
+package and outputs its indented contents to stdout.
+```go
+doc := etree.NewDocument()
+doc.CreateProcInst("xml", `version="1.0" encoding="UTF-8"`)
+doc.CreateProcInst("xml-stylesheet", `type="text/xsl" href="style.xsl"`)
+
+people := doc.CreateElement("People")
+people.CreateComment("These are all known people")
+
+jon := people.CreateElement("Person")
+jon.CreateAttr("name", "Jon")
+
+sally := people.CreateElement("Person")
+sally.CreateAttr("name", "Sally")
+
+doc.Indent(2)
+doc.WriteTo(os.Stdout)
+```
+
+Output:
+```xml
+
+
+
+
+
+
+
+```
+
+### Reading an XML file
+
+Suppose you have a file on disk called `bookstore.xml` containing the
+following data:
+
+```xml
+
+
+
+ Everyday Italian
+ Giada De Laurentiis
+ 2005
+ 30.00
+
+
+
+ Harry Potter
+ J K. Rowling
+ 2005
+ 29.99
+
+
+
+ XQuery Kick Start
+ James McGovern
+ Per Bothner
+ Kurt Cagle
+ James Linn
+ Vaidyanathan Nagarajan
+ 2003
+ 49.99
+
+
+
+ Learning XML
+ Erik T. Ray
+ 2003
+ 39.95
+
+
+
+```
+
+This code reads the file's contents into an etree document.
+```go
+doc := etree.NewDocument()
+if err := doc.ReadFromFile("bookstore.xml"); err != nil {
+ panic(err)
+}
+```
+
+You can also read XML from a string, a byte slice, or an `io.Reader`.
+
+### Processing elements and attributes
+
+This example illustrates several ways to access elements and attributes using
+etree selection queries.
+```go
+root := doc.SelectElement("bookstore")
+fmt.Println("ROOT element:", root.Tag)
+
+for _, book := range root.SelectElements("book") {
+ fmt.Println("CHILD element:", book.Tag)
+ if title := book.SelectElement("title"); title != nil {
+ lang := title.SelectAttrValue("lang", "unknown")
+ fmt.Printf(" TITLE: %s (%s)\n", title.Text(), lang)
+ }
+ for _, attr := range book.Attr {
+ fmt.Printf(" ATTR: %s=%s\n", attr.Key, attr.Value)
+ }
+}
+```
+Output:
+```
+ROOT element: bookstore
+CHILD element: book
+ TITLE: Everyday Italian (en)
+ ATTR: category=COOKING
+CHILD element: book
+ TITLE: Harry Potter (en)
+ ATTR: category=CHILDREN
+CHILD element: book
+ TITLE: XQuery Kick Start (en)
+ ATTR: category=WEB
+CHILD element: book
+ TITLE: Learning XML (en)
+ ATTR: category=WEB
+```
+
+### Path queries
+
+This example uses etree's path functions to select all book titles that fall
+into the category of 'WEB'. The double-slash prefix in the path causes the
+search for book elements to occur recursively; book elements may appear at any
+level of the XML hierarchy.
+```go
+for _, t := range doc.FindElements("//book[@category='WEB']/title") {
+ fmt.Println("Title:", t.Text())
+}
+```
+
+Output:
+```
+Title: XQuery Kick Start
+Title: Learning XML
+```
+
+This example finds the first book element under the root bookstore element and
+outputs the tag and text of each of its child elements.
+```go
+for _, e := range doc.FindElements("./bookstore/book[1]/*") {
+ fmt.Printf("%s: %s\n", e.Tag, e.Text())
+}
+```
+
+Output:
+```
+title: Everyday Italian
+author: Giada De Laurentiis
+year: 2005
+price: 30.00
+```
+
+This example finds all books with a price of 49.99 and outputs their titles.
+```go
+path := etree.MustCompilePath("./bookstore/book[p:price='49.99']/title")
+for _, e := range doc.FindElementsPath(path) {
+ fmt.Println(e.Text())
+}
+```
+
+Output:
+```
+XQuery Kick Start
+```
+
+Note that this example uses the FindElementsPath function, which takes as an
+argument a pre-compiled path object. Use precompiled paths when you plan to
+search with the same path more than once.
+
+### Other features
+
+These are just a few examples of the things the etree package can do. See the
+[documentation](http://godoc.org/github.com/beevik/etree) for a complete
+description of its capabilities.
+
+### Contributing
+
+This project accepts contributions. Just fork the repo and submit a pull
+request!
diff --git a/godo/office/etree/etree.go b/godo/office/etree/etree.go
new file mode 100644
index 0000000..9de4645
--- /dev/null
+++ b/godo/office/etree/etree.go
@@ -0,0 +1,1810 @@
+// Copyright 2015-2019 Brett Vickers.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package etree provides XML services through an Element Tree
+// abstraction.
+package etree
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/xml"
+ "errors"
+ "io"
+ "os"
+ "slices"
+ "strings"
+)
+
+const (
+ // NoIndent is used with the IndentSettings record to remove all
+ // indenting.
+ NoIndent = -1
+)
+
+// ErrXML is returned when XML parsing fails due to incorrect formatting.
+var ErrXML = errors.New("etree: invalid XML format")
+
+// cdataPrefix is used to detect CDATA text when ReadSettings.PreserveCData is
+// true.
+var cdataPrefix = []byte(". If false, XML character references
+ // are also produced for " and '. Default: false.
+ CanonicalText bool
+
+ // CanonicalAttrVal forces the production of XML character references for
+ // attribute value characters &, < and ". If false, XML character
+ // references are also produced for > and '. Ignored when AttrSingleQuote
+ // is true. Default: false.
+ CanonicalAttrVal bool
+
+ // AttrSingleQuote causes attributes to use single quotes (attr='example')
+ // instead of double quotes (attr = "example") when set to true. Default:
+ // false.
+ AttrSingleQuote bool
+
+ // UseCRLF causes the document's Indent* functions to use a carriage return
+ // followed by a linefeed ("\r\n") when outputting a newline. If false,
+ // only a linefeed is used ("\n"). Default: false.
+ //
+ // Deprecated: UseCRLF is deprecated. Use IndentSettings.UseCRLF instead.
+ UseCRLF bool
+}
+
+// dup creates a duplicate of the WriteSettings object.
+func (s *WriteSettings) dup() WriteSettings {
+ return *s
+}
+
+// IndentSettings determine the behavior of the Document's Indent* functions.
+type IndentSettings struct {
+ // Spaces indicates the number of spaces to insert for each level of
+ // indentation. Set to etree.NoIndent to remove all indentation. Ignored
+ // when UseTabs is true. Default: 4.
+ Spaces int
+
+ // UseTabs causes tabs to be used instead of spaces when indenting.
+ // Default: false.
+ UseTabs bool
+
+ // UseCRLF causes newlines to be written as a carriage return followed by
+ // a linefeed ("\r\n"). If false, only a linefeed character is output
+ // for a newline ("\n"). Default: false.
+ UseCRLF bool
+
+ // PreserveLeafWhitespace causes indent functions to preserve whitespace
+ // within XML elements containing only non-CDATA character data. Default:
+ // false.
+ PreserveLeafWhitespace bool
+
+ // SuppressTrailingWhitespace suppresses the generation of a trailing
+ // whitespace characters (such as newlines) at the end of the indented
+ // document. Default: false.
+ SuppressTrailingWhitespace bool
+}
+
+// NewIndentSettings creates a default IndentSettings record.
+func NewIndentSettings() *IndentSettings {
+ return &IndentSettings{
+ Spaces: 4,
+ UseTabs: false,
+ UseCRLF: false,
+ PreserveLeafWhitespace: false,
+ SuppressTrailingWhitespace: false,
+ }
+}
+
+type indentFunc func(depth int) string
+
+func getIndentFunc(s *IndentSettings) indentFunc {
+ if s.UseTabs {
+ if s.UseCRLF {
+ return func(depth int) string { return indentCRLF(depth, indentTabs) }
+ } else {
+ return func(depth int) string { return indentLF(depth, indentTabs) }
+ }
+ } else {
+ if s.Spaces < 0 {
+ return func(depth int) string { return "" }
+ } else if s.UseCRLF {
+ return func(depth int) string { return indentCRLF(depth*s.Spaces, indentSpaces) }
+ } else {
+ return func(depth int) string { return indentLF(depth*s.Spaces, indentSpaces) }
+ }
+ }
+}
+
+// Writer is the interface that wraps the Write* functions called by each token
+// type's WriteTo function.
+type Writer interface {
+ io.StringWriter
+ io.ByteWriter
+ io.Writer
+}
+
+// A Token is an interface type used to represent XML elements, character
+// data, CDATA sections, XML comments, XML directives, and XML processing
+// instructions.
+type Token interface {
+ Parent() *Element
+ Index() int
+ WriteTo(w Writer, s *WriteSettings)
+ dup(parent *Element) Token
+ setParent(parent *Element)
+ setIndex(index int)
+}
+
+// A Document is a container holding a complete XML tree.
+//
+// A document has a single embedded element, which contains zero or more child
+// tokens, one of which is usually the root element. The embedded element may
+// include other children such as processing instruction tokens or character
+// data tokens. The document's embedded element is never directly serialized;
+// only its children are.
+//
+// A document also contains read and write settings, which influence the way
+// the document is deserialized, serialized, and indented.
+type Document struct {
+ Element
+ ReadSettings ReadSettings
+ WriteSettings WriteSettings
+}
+
+// An Element represents an XML element, its attributes, and its child tokens.
+type Element struct {
+ Space, Tag string // namespace prefix and tag
+ Attr []Attr // key-value attribute pairs
+ Child []Token // child tokens (elements, comments, etc.)
+ parent *Element // parent element
+ index int // token index in parent's children
+}
+
+// An Attr represents a key-value attribute within an XML element.
+type Attr struct {
+ Space, Key string // The attribute's namespace prefix and key
+ Value string // The attribute value string
+ element *Element // element containing the attribute
+}
+
+// charDataFlags are used with CharData tokens to store additional settings.
+type charDataFlags uint8
+
+const (
+ // The CharData contains only whitespace.
+ whitespaceFlag charDataFlags = 1 << iota
+
+ // The CharData contains a CDATA section.
+ cdataFlag
+)
+
+// CharData may be used to represent simple text data or a CDATA section
+// within an XML document. The Data property should never be modified
+// directly; use the SetData function instead.
+type CharData struct {
+ Data string // the simple text or CDATA section content
+ parent *Element
+ index int
+ flags charDataFlags
+}
+
+// A Comment represents an XML comment.
+type Comment struct {
+ Data string // the comment's text
+ parent *Element
+ index int
+}
+
+// A Directive represents an XML directive.
+type Directive struct {
+ Data string // the directive string
+ parent *Element
+ index int
+}
+
+// A ProcInst represents an XML processing instruction.
+type ProcInst struct {
+ Target string // the processing instruction target
+ Inst string // the processing instruction value
+ parent *Element
+ index int
+}
+
+// NewDocument creates an XML document without a root element.
+func NewDocument() *Document {
+ return &Document{
+ Element: Element{Child: make([]Token, 0)},
+ }
+}
+
+// NewDocumentWithRoot creates an XML document and sets the element 'e' as its
+// root element. If the element 'e' is already part of another document, it is
+// first removed from its existing document.
+func NewDocumentWithRoot(e *Element) *Document {
+ d := NewDocument()
+ d.SetRoot(e)
+ return d
+}
+
+// Copy returns a recursive, deep copy of the document.
+func (d *Document) Copy() *Document {
+ return &Document{
+ Element: *(d.Element.dup(nil).(*Element)),
+ ReadSettings: d.ReadSettings.dup(),
+ WriteSettings: d.WriteSettings.dup(),
+ }
+}
+
+// Root returns the root element of the document. It returns nil if there is
+// no root element.
+func (d *Document) Root() *Element {
+ for _, t := range d.Child {
+ if c, ok := t.(*Element); ok {
+ return c
+ }
+ }
+ return nil
+}
+
+// SetRoot replaces the document's root element with the element 'e'. If the
+// document already has a root element when this function is called, then the
+// existing root element is unbound from the document. If the element 'e' is
+// part of another document, then it is unbound from the other document.
+func (d *Document) SetRoot(e *Element) {
+ if e.parent != nil {
+ e.parent.RemoveChild(e)
+ }
+
+ // If there is already a root element, replace it.
+ p := &d.Element
+ for i, t := range p.Child {
+ if _, ok := t.(*Element); ok {
+ t.setParent(nil)
+ t.setIndex(-1)
+ p.Child[i] = e
+ e.setParent(p)
+ e.setIndex(i)
+ return
+ }
+ }
+
+ // No existing root element, so add it.
+ p.addChild(e)
+}
+
+// ReadFrom reads XML from the reader 'r' into this document. The function
+// returns the number of bytes read and any error encountered.
+func (d *Document) ReadFrom(r io.Reader) (n int64, err error) {
+ if d.ReadSettings.ValidateInput {
+ b, err := io.ReadAll(r)
+ if err != nil {
+ return 0, err
+ }
+ if err := validateXML(bytes.NewReader(b), d.ReadSettings); err != nil {
+ return 0, err
+ }
+ r = bytes.NewReader(b)
+ }
+ return d.Element.readFrom(r, d.ReadSettings)
+}
+
+// ReadFromFile reads XML from a local file at path 'filepath' into this
+// document.
+func (d *Document) ReadFromFile(filepath string) error {
+ f, err := os.Open(filepath)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ _, err = d.ReadFrom(f)
+ return err
+}
+
+// ReadFromBytes reads XML from the byte slice 'b' into the this document.
+func (d *Document) ReadFromBytes(b []byte) error {
+ if d.ReadSettings.ValidateInput {
+ if err := validateXML(bytes.NewReader(b), d.ReadSettings); err != nil {
+ return err
+ }
+ }
+ _, err := d.Element.readFrom(bytes.NewReader(b), d.ReadSettings)
+ return err
+}
+
+// ReadFromString reads XML from the string 's' into this document.
+func (d *Document) ReadFromString(s string) error {
+ if d.ReadSettings.ValidateInput {
+ if err := validateXML(strings.NewReader(s), d.ReadSettings); err != nil {
+ return err
+ }
+ }
+ _, err := d.Element.readFrom(strings.NewReader(s), d.ReadSettings)
+ return err
+}
+
+// validateXML determines if the data read from the reader 'r' contains
+// well-formed XML according to the rules set by the go xml package.
+func validateXML(r io.Reader, settings ReadSettings) error {
+ dec := newDecoder(r, settings)
+ err := dec.Decode(new(interface{}))
+ if err != nil {
+ return err
+ }
+
+ // If there are any trailing tokens after unmarshalling with Decode(),
+ // then the XML input didn't terminate properly.
+ _, err = dec.Token()
+ if err == io.EOF {
+ return nil
+ }
+ return ErrXML
+}
+
+// newDecoder creates an XML decoder for the reader 'r' configured using
+// the provided read settings.
+func newDecoder(r io.Reader, settings ReadSettings) *xml.Decoder {
+ d := xml.NewDecoder(r)
+ d.CharsetReader = settings.CharsetReader
+ if d.CharsetReader == nil {
+ d.CharsetReader = defaultCharsetReader
+ }
+ d.Strict = !settings.Permissive
+ d.Entity = settings.Entity
+ d.AutoClose = settings.AutoClose
+ return d
+}
+
+// WriteTo serializes the document out to the writer 'w'. The function returns
+// the number of bytes written and any error encountered.
+func (d *Document) WriteTo(w io.Writer) (n int64, err error) {
+ xw := newXmlWriter(w)
+ b := bufio.NewWriter(xw)
+ for _, c := range d.Child {
+ c.WriteTo(b, &d.WriteSettings)
+ }
+ err, n = b.Flush(), xw.bytes
+ return
+}
+
+// WriteToFile serializes the document out to the file at path 'filepath'.
+func (d *Document) WriteToFile(filepath string) error {
+ f, err := os.Create(filepath)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ _, err = d.WriteTo(f)
+ return err
+}
+
+// WriteToBytes serializes this document into a slice of bytes.
+func (d *Document) WriteToBytes() (b []byte, err error) {
+ var buf bytes.Buffer
+ if _, err = d.WriteTo(&buf); err != nil {
+ return
+ }
+ return buf.Bytes(), nil
+}
+
+// WriteToString serializes this document into a string.
+func (d *Document) WriteToString() (s string, err error) {
+ var b []byte
+ if b, err = d.WriteToBytes(); err != nil {
+ return
+ }
+ return string(b), nil
+}
+
+// Indent modifies the document's element tree by inserting character data
+// tokens containing newlines and spaces for indentation. The amount of
+// indentation per depth level is given by the 'spaces' parameter. Other than
+// the number of spaces, default IndentSettings are used.
+func (d *Document) Indent(spaces int) {
+ s := NewIndentSettings()
+ s.Spaces = spaces
+ d.IndentWithSettings(s)
+}
+
+// IndentTabs modifies the document's element tree by inserting CharData
+// tokens containing newlines and tabs for indentation. One tab is used per
+// indentation level. Other than the use of tabs, default IndentSettings
+// are used.
+func (d *Document) IndentTabs() {
+ s := NewIndentSettings()
+ s.UseTabs = true
+ d.IndentWithSettings(s)
+}
+
+// IndentWithSettings modifies the document's element tree by inserting
+// character data tokens containing newlines and indentation. The behavior
+// of the indentation algorithm is configured by the indent settings.
+func (d *Document) IndentWithSettings(s *IndentSettings) {
+ // WriteSettings.UseCRLF is deprecated. Until removed from the package, it
+ // overrides IndentSettings.UseCRLF when true.
+ if d.WriteSettings.UseCRLF {
+ s.UseCRLF = true
+ }
+
+ d.Element.indent(0, getIndentFunc(s), s)
+
+ if s.SuppressTrailingWhitespace {
+ d.Element.stripTrailingWhitespace()
+ }
+}
+
+// Unindent modifies the document's element tree by removing character data
+// tokens containing only whitespace. Other than the removal of indentation,
+// default IndentSettings are used.
+func (d *Document) Unindent() {
+ s := NewIndentSettings()
+ s.Spaces = NoIndent
+ d.IndentWithSettings(s)
+}
+
+// NewElement creates an unparented element with the specified tag (i.e.,
+// name). The tag may include a namespace prefix followed by a colon.
+func NewElement(tag string) *Element {
+ space, stag := spaceDecompose(tag)
+ return newElement(space, stag, nil)
+}
+
+// newElement is a helper function that creates an element and binds it to
+// a parent element if possible.
+func newElement(space, tag string, parent *Element) *Element {
+ e := &Element{
+ Space: space,
+ Tag: tag,
+ Attr: make([]Attr, 0),
+ Child: make([]Token, 0),
+ parent: parent,
+ index: -1,
+ }
+ if parent != nil {
+ parent.addChild(e)
+ }
+ return e
+}
+
+// Copy creates a recursive, deep copy of the element and all its attributes
+// and children. The returned element has no parent but can be parented to a
+// another element using AddChild, or added to a document with SetRoot or
+// NewDocumentWithRoot.
+func (e *Element) Copy() *Element {
+ return e.dup(nil).(*Element)
+}
+
+// FullTag returns the element e's complete tag, including namespace prefix if
+// present.
+func (e *Element) FullTag() string {
+ if e.Space == "" {
+ return e.Tag
+ }
+ return e.Space + ":" + e.Tag
+}
+
+// NamespaceURI returns the XML namespace URI associated with the element. If
+// the element is part of the XML default namespace, NamespaceURI returns the
+// empty string.
+func (e *Element) NamespaceURI() string {
+ if e.Space == "" {
+ return e.findDefaultNamespaceURI()
+ }
+ return e.findLocalNamespaceURI(e.Space)
+}
+
+// findLocalNamespaceURI finds the namespace URI corresponding to the
+// requested prefix.
+func (e *Element) findLocalNamespaceURI(prefix string) string {
+ for _, a := range e.Attr {
+ if a.Space == "xmlns" && a.Key == prefix {
+ return a.Value
+ }
+ }
+
+ if e.parent == nil {
+ return ""
+ }
+
+ return e.parent.findLocalNamespaceURI(prefix)
+}
+
+// findDefaultNamespaceURI finds the default namespace URI of the element.
+func (e *Element) findDefaultNamespaceURI() string {
+ for _, a := range e.Attr {
+ if a.Space == "" && a.Key == "xmlns" {
+ return a.Value
+ }
+ }
+
+ if e.parent == nil {
+ return ""
+ }
+
+ return e.parent.findDefaultNamespaceURI()
+}
+
+// namespacePrefix returns the namespace prefix associated with the element.
+func (e *Element) namespacePrefix() string {
+ return e.Space
+}
+
+// name returns the tag associated with the element.
+func (e *Element) name() string {
+ return e.Tag
+}
+
+// ReindexChildren recalculates the index values of the element's child
+// tokens. This is necessary only if you have manually manipulated the
+// element's `Child` array.
+func (e *Element) ReindexChildren() {
+ for i := 0; i < len(e.Child); i++ {
+ e.Child[i].setIndex(i)
+ }
+}
+
+// Text returns all character data immediately following the element's opening
+// tag.
+func (e *Element) Text() string {
+ if len(e.Child) == 0 {
+ return ""
+ }
+
+ text := ""
+ for _, ch := range e.Child {
+ if cd, ok := ch.(*CharData); ok {
+ if text == "" {
+ text = cd.Data
+ } else {
+ text += cd.Data
+ }
+ } else if _, ok := ch.(*Comment); ok {
+ // ignore
+ } else {
+ break
+ }
+ }
+ return text
+}
+
+// SetText replaces all character data immediately following an element's
+// opening tag with the requested string.
+func (e *Element) SetText(text string) {
+ e.replaceText(0, text, 0)
+}
+
+// SetCData replaces all character data immediately following an element's
+// opening tag with a CDATA section.
+func (e *Element) SetCData(text string) {
+ e.replaceText(0, text, cdataFlag)
+}
+
+// Tail returns all character data immediately following the element's end
+// tag.
+func (e *Element) Tail() string {
+ if e.Parent() == nil {
+ return ""
+ }
+
+ p := e.Parent()
+ i := e.Index()
+
+ text := ""
+ for _, ch := range p.Child[i+1:] {
+ if cd, ok := ch.(*CharData); ok {
+ if text == "" {
+ text = cd.Data
+ } else {
+ text += cd.Data
+ }
+ } else {
+ break
+ }
+ }
+ return text
+}
+
+// SetTail replaces all character data immediately following the element's end
+// tag with the requested string.
+func (e *Element) SetTail(text string) {
+ if e.Parent() == nil {
+ return
+ }
+
+ p := e.Parent()
+ p.replaceText(e.Index()+1, text, 0)
+}
+
+// replaceText is a helper function that replaces a series of chardata tokens
+// starting at index i with the requested text.
+func (e *Element) replaceText(i int, text string, flags charDataFlags) {
+ end := e.findTermCharDataIndex(i)
+
+ switch {
+ case end == i:
+ if text != "" {
+ // insert a new chardata token at index i
+ cd := newCharData(text, flags, nil)
+ e.InsertChildAt(i, cd)
+ }
+
+ case end == i+1:
+ if text == "" {
+ // remove the chardata token at index i
+ e.RemoveChildAt(i)
+ } else {
+ // replace the first and only character token at index i
+ cd := e.Child[i].(*CharData)
+ cd.Data, cd.flags = text, flags
+ }
+
+ default:
+ if text == "" {
+ // remove all chardata tokens starting from index i
+ copy(e.Child[i:], e.Child[end:])
+ removed := end - i
+ e.Child = e.Child[:len(e.Child)-removed]
+ for j := i; j < len(e.Child); j++ {
+ e.Child[j].setIndex(j)
+ }
+ } else {
+ // replace the first chardata token at index i and remove all
+ // subsequent chardata tokens
+ cd := e.Child[i].(*CharData)
+ cd.Data, cd.flags = text, flags
+ copy(e.Child[i+1:], e.Child[end:])
+ removed := end - (i + 1)
+ e.Child = e.Child[:len(e.Child)-removed]
+ for j := i + 1; j < len(e.Child); j++ {
+ e.Child[j].setIndex(j)
+ }
+ }
+ }
+}
+
+// findTermCharDataIndex finds the index of the first child token that isn't
+// a CharData token. It starts from the requested start index.
+func (e *Element) findTermCharDataIndex(start int) int {
+ for i := start; i < len(e.Child); i++ {
+ if _, ok := e.Child[i].(*CharData); !ok {
+ return i
+ }
+ }
+ return len(e.Child)
+}
+
+// CreateElement creates a new element with the specified tag (i.e., name) and
+// adds it as the last child token of this element. The tag may include a
+// prefix followed by a colon.
+func (e *Element) CreateElement(tag string) *Element {
+ space, stag := spaceDecompose(tag)
+ return newElement(space, stag, e)
+}
+
+// AddChild adds the token 't' as the last child of the element. If token 't'
+// was already the child of another element, it is first removed from its
+// parent element.
+func (e *Element) AddChild(t Token) {
+ if t.Parent() != nil {
+ t.Parent().RemoveChild(t)
+ }
+ e.addChild(t)
+}
+
+// InsertChild inserts the token 't' into this element's list of children just
+// before the element's existing child token 'ex'. If the existing element
+// 'ex' does not appear in this element's list of child tokens, then 't' is
+// added to the end of this element's list of child tokens. If token 't' is
+// already the child of another element, it is first removed from the other
+// element's list of child tokens.
+//
+// Deprecated: InsertChild is deprecated. Use InsertChildAt instead.
+func (e *Element) InsertChild(ex Token, t Token) {
+ if ex == nil || ex.Parent() != e {
+ e.AddChild(t)
+ return
+ }
+
+ if t.Parent() != nil {
+ t.Parent().RemoveChild(t)
+ }
+
+ t.setParent(e)
+
+ i := ex.Index()
+ e.Child = append(e.Child, nil)
+ copy(e.Child[i+1:], e.Child[i:])
+ e.Child[i] = t
+
+ for j := i; j < len(e.Child); j++ {
+ e.Child[j].setIndex(j)
+ }
+}
+
+// InsertChildAt inserts the token 't' into this element's list of child
+// tokens just before the requested 'index'. If the index is greater than or
+// equal to the length of the list of child tokens, then the token 't' is
+// added to the end of the list of child tokens.
+func (e *Element) InsertChildAt(index int, t Token) {
+ if index >= len(e.Child) {
+ e.AddChild(t)
+ return
+ }
+
+ if t.Parent() != nil {
+ if t.Parent() == e && t.Index() > index {
+ index--
+ }
+ t.Parent().RemoveChild(t)
+ }
+
+ t.setParent(e)
+
+ e.Child = append(e.Child, nil)
+ copy(e.Child[index+1:], e.Child[index:])
+ e.Child[index] = t
+
+ for j := index; j < len(e.Child); j++ {
+ e.Child[j].setIndex(j)
+ }
+}
+
+// RemoveChild attempts to remove the token 't' from this element's list of
+// child tokens. If the token 't' was a child of this element, then it is
+// removed and returned. Otherwise, nil is returned.
+func (e *Element) RemoveChild(t Token) Token {
+ if t.Parent() != e {
+ return nil
+ }
+ return e.RemoveChildAt(t.Index())
+}
+
+// RemoveChildAt removes the child token appearing in slot 'index' of this
+// element's list of child tokens. The removed child token is then returned.
+// If the index is out of bounds, no child is removed and nil is returned.
+func (e *Element) RemoveChildAt(index int) Token {
+ if index >= len(e.Child) {
+ return nil
+ }
+
+ t := e.Child[index]
+ for j := index + 1; j < len(e.Child); j++ {
+ e.Child[j].setIndex(j - 1)
+ }
+ e.Child = append(e.Child[:index], e.Child[index+1:]...)
+ t.setIndex(-1)
+ t.setParent(nil)
+ return t
+}
+
+// autoClose analyzes the stack's top element and the current token to decide
+// whether the top element should be closed.
+func (e *Element) autoClose(stack *stack[*Element], t xml.Token, tags []string) {
+ if stack.empty() {
+ return
+ }
+
+ top := stack.peek()
+
+ for _, tag := range tags {
+ if strings.EqualFold(tag, top.FullTag()) {
+ if e, ok := t.(xml.EndElement); !ok ||
+ !strings.EqualFold(e.Name.Space, top.Space) ||
+ !strings.EqualFold(e.Name.Local, top.Tag) {
+ stack.pop()
+ }
+ break
+ }
+ }
+}
+
+// ReadFrom reads XML from the reader 'ri' and stores the result as a new
+// child of this element.
+func (e *Element) readFrom(ri io.Reader, settings ReadSettings) (n int64, err error) {
+ var r xmlReader
+ var pr *xmlPeekReader
+ if settings.PreserveCData {
+ pr = newXmlPeekReader(ri)
+ r = pr
+ } else {
+ r = newXmlSimpleReader(ri)
+ }
+
+ attrCheck := make(map[xml.Name]int)
+ dec := newDecoder(r, settings)
+
+ var stack stack[*Element]
+ stack.push(e)
+ for {
+ if pr != nil {
+ pr.PeekPrepare(dec.InputOffset(), len(cdataPrefix))
+ }
+
+ t, err := dec.RawToken()
+
+ if settings.Permissive && settings.AutoClose != nil {
+ e.autoClose(&stack, t, settings.AutoClose)
+ }
+
+ switch {
+ case err == io.EOF:
+ if len(stack.data) != 1 {
+ return r.Bytes(), ErrXML
+ }
+ return r.Bytes(), nil
+ case err != nil:
+ return r.Bytes(), err
+ case stack.empty():
+ return r.Bytes(), ErrXML
+ }
+
+ top := stack.peek()
+
+ switch t := t.(type) {
+ case xml.StartElement:
+ e := newElement(t.Name.Space, t.Name.Local, top)
+ if settings.PreserveDuplicateAttrs || len(t.Attr) < 2 {
+ for _, a := range t.Attr {
+ e.addAttr(a.Name.Space, a.Name.Local, a.Value)
+ }
+ } else {
+ for _, a := range t.Attr {
+ if i, contains := attrCheck[a.Name]; contains {
+ e.Attr[i].Value = a.Value
+ } else {
+ attrCheck[a.Name] = e.addAttr(a.Name.Space, a.Name.Local, a.Value)
+ }
+ }
+ clear(attrCheck)
+ }
+ stack.push(e)
+ case xml.EndElement:
+ if top.Tag != t.Name.Local || top.Space != t.Name.Space {
+ return r.Bytes(), ErrXML
+ }
+ stack.pop()
+ case xml.CharData:
+ data := string(t)
+ var flags charDataFlags
+ if pr != nil {
+ peekBuf := pr.PeekFinalize()
+ if bytes.Equal(peekBuf, cdataPrefix) {
+ flags = cdataFlag
+ } else if isWhitespace(data) {
+ flags = whitespaceFlag
+ }
+ } else {
+ if isWhitespace(data) {
+ flags = whitespaceFlag
+ }
+ }
+ newCharData(data, flags, top)
+ case xml.Comment:
+ newComment(string(t), top)
+ case xml.Directive:
+ newDirective(string(t), top)
+ case xml.ProcInst:
+ newProcInst(t.Target, string(t.Inst), top)
+ }
+ }
+}
+
+// SelectAttr finds an element attribute matching the requested 'key' and, if
+// found, returns a pointer to the matching attribute. The function returns
+// nil if no matching attribute is found. The key may include a namespace
+// prefix followed by a colon.
+func (e *Element) SelectAttr(key string) *Attr {
+ space, skey := spaceDecompose(key)
+ for i, a := range e.Attr {
+ if spaceMatch(space, a.Space) && skey == a.Key {
+ return &e.Attr[i]
+ }
+ }
+ return nil
+}
+
+// SelectAttrValue finds an element attribute matching the requested 'key' and
+// returns its value if found. If no matching attribute is found, the function
+// returns the 'dflt' value instead. The key may include a namespace prefix
+// followed by a colon.
+func (e *Element) SelectAttrValue(key, dflt string) string {
+ space, skey := spaceDecompose(key)
+ for _, a := range e.Attr {
+ if spaceMatch(space, a.Space) && skey == a.Key {
+ return a.Value
+ }
+ }
+ return dflt
+}
+
+// ChildElements returns all elements that are children of this element.
+func (e *Element) ChildElements() []*Element {
+ var elements []*Element
+ for _, t := range e.Child {
+ if c, ok := t.(*Element); ok {
+ elements = append(elements, c)
+ }
+ }
+ return elements
+}
+
+// SelectElement returns the first child element with the given 'tag' (i.e.,
+// name). The function returns nil if no child element matching the tag is
+// found. The tag may include a namespace prefix followed by a colon.
+func (e *Element) SelectElement(tag string) *Element {
+ space, stag := spaceDecompose(tag)
+ for _, t := range e.Child {
+ if c, ok := t.(*Element); ok && spaceMatch(space, c.Space) && stag == c.Tag {
+ return c
+ }
+ }
+ return nil
+}
+
+// SelectElements returns a slice of all child elements with the given 'tag'
+// (i.e., name). The tag may include a namespace prefix followed by a colon.
+func (e *Element) SelectElements(tag string) []*Element {
+ space, stag := spaceDecompose(tag)
+ var elements []*Element
+ for _, t := range e.Child {
+ if c, ok := t.(*Element); ok && spaceMatch(space, c.Space) && stag == c.Tag {
+ elements = append(elements, c)
+ }
+ }
+ return elements
+}
+
+// FindElement returns the first element matched by the XPath-like 'path'
+// string. The function returns nil if no child element is found using the
+// path. It panics if an invalid path string is supplied.
+func (e *Element) FindElement(path string) *Element {
+ return e.FindElementPath(MustCompilePath(path))
+}
+
+// FindElementPath returns the first element matched by the 'path' object. The
+// function returns nil if no element is found using the path.
+func (e *Element) FindElementPath(path Path) *Element {
+ p := newPather()
+ elements := p.traverse(e, path)
+ if len(elements) > 0 {
+ return elements[0]
+ }
+ return nil
+}
+
+// FindElements returns a slice of elements matched by the XPath-like 'path'
+// string. The function returns nil if no child element is found using the
+// path. It panics if an invalid path string is supplied.
+func (e *Element) FindElements(path string) []*Element {
+ return e.FindElementsPath(MustCompilePath(path))
+}
+
+// FindElementsPath returns a slice of elements matched by the 'path' object.
+func (e *Element) FindElementsPath(path Path) []*Element {
+ p := newPather()
+ return p.traverse(e, path)
+}
+
+// NotNil returns the receiver element if it isn't nil; otherwise, it returns
+// an unparented element with an empty string tag. This function simplifies
+// the task of writing code to ignore not-found results from element queries.
+// For example, instead of writing this:
+//
+// if e := doc.SelectElement("enabled"); e != nil {
+// e.SetText("true")
+// }
+//
+// You could write this:
+//
+// doc.SelectElement("enabled").NotNil().SetText("true")
+func (e *Element) NotNil() *Element {
+ if e == nil {
+ return NewElement("")
+ }
+ return e
+}
+
+// GetPath returns the absolute path of the element. The absolute path is the
+// full path from the document's root.
+func (e *Element) GetPath() string {
+ path := []string{}
+ for seg := e; seg != nil; seg = seg.Parent() {
+ if seg.Tag != "" {
+ path = append(path, seg.Tag)
+ }
+ }
+
+ // Reverse the path.
+ for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 {
+ path[i], path[j] = path[j], path[i]
+ }
+
+ return "/" + strings.Join(path, "/")
+}
+
+// GetRelativePath returns the path of this element relative to the 'source'
+// element. If the two elements are not part of the same element tree, then
+// the function returns the empty string.
+func (e *Element) GetRelativePath(source *Element) string {
+ var path []*Element
+
+ if source == nil {
+ return ""
+ }
+
+ // Build a reverse path from the element toward the root. Stop if the
+ // source element is encountered.
+ var seg *Element
+ for seg = e; seg != nil && seg != source; seg = seg.Parent() {
+ path = append(path, seg)
+ }
+
+ // If we found the source element, reverse the path and compose the
+ // string.
+ if seg == source {
+ if len(path) == 0 {
+ return "."
+ }
+ parts := []string{}
+ for i := len(path) - 1; i >= 0; i-- {
+ parts = append(parts, path[i].Tag)
+ }
+ return "./" + strings.Join(parts, "/")
+ }
+
+ // The source wasn't encountered, so climb from the source element toward
+ // the root of the tree until an element in the reversed path is
+ // encountered.
+
+ findPathIndex := func(e *Element, path []*Element) int {
+ for i, ee := range path {
+ if e == ee {
+ return i
+ }
+ }
+ return -1
+ }
+
+ climb := 0
+ for seg = source; seg != nil; seg = seg.Parent() {
+ i := findPathIndex(seg, path)
+ if i >= 0 {
+ path = path[:i] // truncate at found segment
+ break
+ }
+ climb++
+ }
+
+ // No element in the reversed path was encountered, so the two elements
+ // must not be part of the same tree.
+ if seg == nil {
+ return ""
+ }
+
+ // Reverse the (possibly truncated) path and prepend ".." segments to
+ // climb.
+ parts := []string{}
+ for i := 0; i < climb; i++ {
+ parts = append(parts, "..")
+ }
+ for i := len(path) - 1; i >= 0; i-- {
+ parts = append(parts, path[i].Tag)
+ }
+ return strings.Join(parts, "/")
+}
+
+// IndentWithSettings modifies the element and its child tree by inserting
+// character data tokens containing newlines and indentation. The behavior of
+// the indentation algorithm is configured by the indent settings. Because
+// this function indents the element as if it were at the root of a document,
+// it is most useful when called just before writing the element as an XML
+// fragment using WriteTo.
+func (e *Element) IndentWithSettings(s *IndentSettings) {
+ e.indent(1, getIndentFunc(s), s)
+}
+
+// indent recursively inserts proper indentation between an XML element's
+// child tokens.
+func (e *Element) indent(depth int, indent indentFunc, s *IndentSettings) {
+ e.stripIndent(s)
+ n := len(e.Child)
+ if n == 0 {
+ return
+ }
+
+ oldChild := e.Child
+ e.Child = make([]Token, 0, n*2+1)
+ isCharData, firstNonCharData := false, true
+ for _, c := range oldChild {
+ // Insert NL+indent before child if it's not character data.
+ // Exceptions: when it's the first non-character-data child, or when
+ // the child is at root depth.
+ _, isCharData = c.(*CharData)
+ if !isCharData {
+ if !firstNonCharData || depth > 0 {
+ s := indent(depth)
+ if s != "" {
+ newCharData(s, whitespaceFlag, e)
+ }
+ }
+ firstNonCharData = false
+ }
+
+ e.addChild(c)
+
+ // Recursively process child elements.
+ if ce, ok := c.(*Element); ok {
+ ce.indent(depth+1, indent, s)
+ }
+ }
+
+ // Insert NL+indent before the last child.
+ if !isCharData {
+ if !firstNonCharData || depth > 0 {
+ s := indent(depth - 1)
+ if s != "" {
+ newCharData(s, whitespaceFlag, e)
+ }
+ }
+ }
+}
+
+// stripIndent removes any previously inserted indentation.
+func (e *Element) stripIndent(s *IndentSettings) {
+ // Count the number of non-indent child tokens
+ n := len(e.Child)
+ for _, c := range e.Child {
+ if cd, ok := c.(*CharData); ok && cd.IsWhitespace() {
+ n--
+ }
+ }
+ if n == len(e.Child) {
+ return
+ }
+ if n == 0 && len(e.Child) == 1 && s.PreserveLeafWhitespace {
+ return
+ }
+
+ // Strip out indent CharData
+ newChild := make([]Token, n)
+ j := 0
+ for _, c := range e.Child {
+ if cd, ok := c.(*CharData); ok && cd.IsWhitespace() {
+ continue
+ }
+ newChild[j] = c
+ newChild[j].setIndex(j)
+ j++
+ }
+ e.Child = newChild
+}
+
+// stripTrailingWhitespace removes any trailing whitespace CharData tokens
+// from the element's children.
+func (e *Element) stripTrailingWhitespace() {
+ for i := len(e.Child) - 1; i >= 0; i-- {
+ if cd, ok := e.Child[i].(*CharData); !ok || !cd.IsWhitespace() {
+ e.Child = e.Child[:i+1]
+ return
+ }
+ }
+}
+
+// dup duplicates the element.
+func (e *Element) dup(parent *Element) Token {
+ ne := &Element{
+ Space: e.Space,
+ Tag: e.Tag,
+ Attr: make([]Attr, len(e.Attr)),
+ Child: make([]Token, len(e.Child)),
+ parent: parent,
+ index: e.index,
+ }
+ for i, t := range e.Child {
+ ne.Child[i] = t.dup(ne)
+ }
+ copy(ne.Attr, e.Attr)
+ return ne
+}
+
+// NextSibling returns this element's next sibling element. It returns nil if
+// there is no next sibling element.
+func (e *Element) NextSibling() *Element {
+ if e.parent == nil {
+ return nil
+ }
+ for i := e.index + 1; i < len(e.parent.Child); i++ {
+ if s, ok := e.parent.Child[i].(*Element); ok {
+ return s
+ }
+ }
+ return nil
+}
+
+// PrevSibling returns this element's preceding sibling element. It returns
+// nil if there is no preceding sibling element.
+func (e *Element) PrevSibling() *Element {
+ if e.parent == nil {
+ return nil
+ }
+ for i := e.index - 1; i >= 0; i-- {
+ if s, ok := e.parent.Child[i].(*Element); ok {
+ return s
+ }
+ }
+ return nil
+}
+
+// Parent returns this element's parent element. It returns nil if this
+// element has no parent.
+func (e *Element) Parent() *Element {
+ return e.parent
+}
+
+// Index returns the index of this element within its parent element's
+// list of child tokens. If this element has no parent, then the function
+// returns -1.
+func (e *Element) Index() int {
+ return e.index
+}
+
+// WriteTo serializes the element to the writer w.
+func (e *Element) WriteTo(w Writer, s *WriteSettings) {
+ w.WriteByte('<')
+ w.WriteString(e.FullTag())
+ for _, a := range e.Attr {
+ w.WriteByte(' ')
+ a.WriteTo(w, s)
+ }
+ if len(e.Child) > 0 {
+ w.WriteByte('>')
+ for _, c := range e.Child {
+ c.WriteTo(w, s)
+ }
+ w.Write([]byte{'<', '/'})
+ w.WriteString(e.FullTag())
+ w.WriteByte('>')
+ } else {
+ if s.CanonicalEndTags {
+ w.Write([]byte{'>', '<', '/'})
+ w.WriteString(e.FullTag())
+ w.WriteByte('>')
+ } else {
+ w.Write([]byte{'/', '>'})
+ }
+ }
+}
+
+// setParent replaces this element token's parent.
+func (e *Element) setParent(parent *Element) {
+ e.parent = parent
+}
+
+// setIndex sets this element token's index within its parent's Child slice.
+func (e *Element) setIndex(index int) {
+ e.index = index
+}
+
+// addChild adds a child token to the element e.
+func (e *Element) addChild(t Token) {
+ t.setParent(e)
+ t.setIndex(len(e.Child))
+ e.Child = append(e.Child, t)
+}
+
+// CreateAttr creates an attribute with the specified 'key' and 'value' and
+// adds it to this element. If an attribute with same key already exists on
+// this element, then its value is replaced. The key may include a namespace
+// prefix followed by a colon.
+func (e *Element) CreateAttr(key, value string) *Attr {
+ space, skey := spaceDecompose(key)
+
+ for i, a := range e.Attr {
+ if space == a.Space && skey == a.Key {
+ e.Attr[i].Value = value
+ return &e.Attr[i]
+ }
+ }
+
+ i := e.addAttr(space, skey, value)
+ return &e.Attr[i]
+}
+
+// addAttr is a helper function that adds an attribute to an element. Returns
+// the index of the added attribute.
+func (e *Element) addAttr(space, key, value string) int {
+ a := Attr{
+ Space: space,
+ Key: key,
+ Value: value,
+ element: e,
+ }
+ e.Attr = append(e.Attr, a)
+ return len(e.Attr) - 1
+}
+
+// RemoveAttr removes the first attribute of this element whose key matches
+// 'key'. It returns a copy of the removed attribute if a match is found. If
+// no match is found, it returns nil. The key may include a namespace prefix
+// followed by a colon.
+func (e *Element) RemoveAttr(key string) *Attr {
+ space, skey := spaceDecompose(key)
+ for i, a := range e.Attr {
+ if space == a.Space && skey == a.Key {
+ e.Attr = append(e.Attr[0:i], e.Attr[i+1:]...)
+ return &Attr{
+ Space: a.Space,
+ Key: a.Key,
+ Value: a.Value,
+ element: nil,
+ }
+ }
+ }
+ return nil
+}
+
+// SortAttrs sorts this element's attributes lexicographically by key.
+func (e *Element) SortAttrs() {
+ slices.SortFunc(e.Attr, func(a, b Attr) int {
+ if v := strings.Compare(a.Space, b.Space); v != 0 {
+ return v
+ }
+ return strings.Compare(a.Key, b.Key)
+ })
+}
+
+// FullKey returns this attribute's complete key, including namespace prefix
+// if present.
+func (a *Attr) FullKey() string {
+ if a.Space == "" {
+ return a.Key
+ }
+ return a.Space + ":" + a.Key
+}
+
+// Element returns a pointer to the element containing this attribute.
+func (a *Attr) Element() *Element {
+ return a.element
+}
+
+// NamespaceURI returns the XML namespace URI associated with this attribute.
+// The function returns the empty string if the attribute is unprefixed or
+// if the attribute is part of the XML default namespace.
+func (a *Attr) NamespaceURI() string {
+ if a.Space == "" {
+ return ""
+ }
+ return a.element.findLocalNamespaceURI(a.Space)
+}
+
+// WriteTo serializes the attribute to the writer.
+func (a *Attr) WriteTo(w Writer, s *WriteSettings) {
+ w.WriteString(a.FullKey())
+ if s.AttrSingleQuote {
+ w.WriteString(`='`)
+ } else {
+ w.WriteString(`="`)
+ }
+ var m escapeMode
+ if s.CanonicalAttrVal && !s.AttrSingleQuote {
+ m = escapeCanonicalAttr
+ } else {
+ m = escapeNormal
+ }
+ escapeString(w, a.Value, m)
+ if s.AttrSingleQuote {
+ w.WriteByte('\'')
+ } else {
+ w.WriteByte('"')
+ }
+}
+
+// NewText creates an unparented CharData token containing simple text data.
+func NewText(text string) *CharData {
+ return newCharData(text, 0, nil)
+}
+
+// NewCData creates an unparented XML character CDATA section with 'data' as
+// its content.
+func NewCData(data string) *CharData {
+ return newCharData(data, cdataFlag, nil)
+}
+
+// NewCharData creates an unparented CharData token containing simple text
+// data.
+//
+// Deprecated: NewCharData is deprecated. Instead, use NewText, which does the
+// same thing.
+func NewCharData(data string) *CharData {
+ return newCharData(data, 0, nil)
+}
+
+// newCharData creates a character data token and binds it to a parent
+// element. If parent is nil, the CharData token remains unbound.
+func newCharData(data string, flags charDataFlags, parent *Element) *CharData {
+ c := &CharData{
+ Data: data,
+ parent: nil,
+ index: -1,
+ flags: flags,
+ }
+ if parent != nil {
+ parent.addChild(c)
+ }
+ return c
+}
+
+// CreateText creates a CharData token containing simple text data and adds it
+// to the end of this element's list of child tokens.
+func (e *Element) CreateText(text string) *CharData {
+ return newCharData(text, 0, e)
+}
+
+// CreateCData creates a CharData token containing a CDATA section with 'data'
+// as its content and adds it to the end of this element's list of child
+// tokens.
+func (e *Element) CreateCData(data string) *CharData {
+ return newCharData(data, cdataFlag, e)
+}
+
+// CreateCharData creates a CharData token containing simple text data and
+// adds it to the end of this element's list of child tokens.
+//
+// Deprecated: CreateCharData is deprecated. Instead, use CreateText, which
+// does the same thing.
+func (e *Element) CreateCharData(data string) *CharData {
+ return e.CreateText(data)
+}
+
+// SetData modifies the content of the CharData token. In the case of a
+// CharData token containing simple text, the simple text is modified. In the
+// case of a CharData token containing a CDATA section, the CDATA section's
+// content is modified.
+func (c *CharData) SetData(text string) {
+ c.Data = text
+ if isWhitespace(text) {
+ c.flags |= whitespaceFlag
+ } else {
+ c.flags &= ^whitespaceFlag
+ }
+}
+
+// IsCData returns true if this CharData token is contains a CDATA section. It
+// returns false if the CharData token contains simple text.
+func (c *CharData) IsCData() bool {
+ return (c.flags & cdataFlag) != 0
+}
+
+// IsWhitespace returns true if this CharData token contains only whitespace.
+func (c *CharData) IsWhitespace() bool {
+ return (c.flags & whitespaceFlag) != 0
+}
+
+// Parent returns this CharData token's parent element, or nil if it has no
+// parent.
+func (c *CharData) Parent() *Element {
+ return c.parent
+}
+
+// Index returns the index of this CharData token within its parent element's
+// list of child tokens. If this CharData token has no parent, then the
+// function returns -1.
+func (c *CharData) Index() int {
+ return c.index
+}
+
+// WriteTo serializes character data to the writer.
+func (c *CharData) WriteTo(w Writer, s *WriteSettings) {
+ if c.IsCData() {
+ w.WriteString(``)
+ } else {
+ var m escapeMode
+ if s.CanonicalText {
+ m = escapeCanonicalText
+ } else {
+ m = escapeNormal
+ }
+ escapeString(w, c.Data, m)
+ }
+}
+
+// dup duplicates the character data.
+func (c *CharData) dup(parent *Element) Token {
+ return &CharData{
+ Data: c.Data,
+ flags: c.flags,
+ parent: parent,
+ index: c.index,
+ }
+}
+
+// setParent replaces the character data token's parent.
+func (c *CharData) setParent(parent *Element) {
+ c.parent = parent
+}
+
+// setIndex sets the CharData token's index within its parent element's Child
+// slice.
+func (c *CharData) setIndex(index int) {
+ c.index = index
+}
+
+// NewComment creates an unparented comment token.
+func NewComment(comment string) *Comment {
+ return newComment(comment, nil)
+}
+
+// NewComment creates a comment token and sets its parent element to 'parent'.
+func newComment(comment string, parent *Element) *Comment {
+ c := &Comment{
+ Data: comment,
+ parent: nil,
+ index: -1,
+ }
+ if parent != nil {
+ parent.addChild(c)
+ }
+ return c
+}
+
+// CreateComment creates a comment token using the specified 'comment' string
+// and adds it as the last child token of this element.
+func (e *Element) CreateComment(comment string) *Comment {
+ return newComment(comment, e)
+}
+
+// dup duplicates the comment.
+func (c *Comment) dup(parent *Element) Token {
+ return &Comment{
+ Data: c.Data,
+ parent: parent,
+ index: c.index,
+ }
+}
+
+// Parent returns comment token's parent element, or nil if it has no parent.
+func (c *Comment) Parent() *Element {
+ return c.parent
+}
+
+// Index returns the index of this Comment token within its parent element's
+// list of child tokens. If this Comment token has no parent, then the
+// function returns -1.
+func (c *Comment) Index() int {
+ return c.index
+}
+
+// WriteTo serialies the comment to the writer.
+func (c *Comment) WriteTo(w Writer, s *WriteSettings) {
+ w.WriteString("")
+}
+
+// setParent replaces the comment token's parent.
+func (c *Comment) setParent(parent *Element) {
+ c.parent = parent
+}
+
+// setIndex sets the Comment token's index within its parent element's Child
+// slice.
+func (c *Comment) setIndex(index int) {
+ c.index = index
+}
+
+// NewDirective creates an unparented XML directive token.
+func NewDirective(data string) *Directive {
+ return newDirective(data, nil)
+}
+
+// newDirective creates an XML directive and binds it to a parent element. If
+// parent is nil, the Directive remains unbound.
+func newDirective(data string, parent *Element) *Directive {
+ d := &Directive{
+ Data: data,
+ parent: nil,
+ index: -1,
+ }
+ if parent != nil {
+ parent.addChild(d)
+ }
+ return d
+}
+
+// CreateDirective creates an XML directive token with the specified 'data'
+// value and adds it as the last child token of this element.
+func (e *Element) CreateDirective(data string) *Directive {
+ return newDirective(data, e)
+}
+
+// dup duplicates the directive.
+func (d *Directive) dup(parent *Element) Token {
+ return &Directive{
+ Data: d.Data,
+ parent: parent,
+ index: d.index,
+ }
+}
+
+// Parent returns directive token's parent element, or nil if it has no
+// parent.
+func (d *Directive) Parent() *Element {
+ return d.parent
+}
+
+// Index returns the index of this Directive token within its parent element's
+// list of child tokens. If this Directive token has no parent, then the
+// function returns -1.
+func (d *Directive) Index() int {
+ return d.index
+}
+
+// WriteTo serializes the XML directive to the writer.
+func (d *Directive) WriteTo(w Writer, s *WriteSettings) {
+ w.WriteString("")
+}
+
+// setParent replaces the directive token's parent.
+func (d *Directive) setParent(parent *Element) {
+ d.parent = parent
+}
+
+// setIndex sets the Directive token's index within its parent element's Child
+// slice.
+func (d *Directive) setIndex(index int) {
+ d.index = index
+}
+
+// NewProcInst creates an unparented XML processing instruction.
+func NewProcInst(target, inst string) *ProcInst {
+ return newProcInst(target, inst, nil)
+}
+
+// newProcInst creates an XML processing instruction and binds it to a parent
+// element. If parent is nil, the ProcInst remains unbound.
+func newProcInst(target, inst string, parent *Element) *ProcInst {
+ p := &ProcInst{
+ Target: target,
+ Inst: inst,
+ parent: nil,
+ index: -1,
+ }
+ if parent != nil {
+ parent.addChild(p)
+ }
+ return p
+}
+
+// CreateProcInst creates an XML processing instruction token with the
+// specified 'target' and instruction 'inst'. It is then added as the last
+// child token of this element.
+func (e *Element) CreateProcInst(target, inst string) *ProcInst {
+ return newProcInst(target, inst, e)
+}
+
+// dup duplicates the procinst.
+func (p *ProcInst) dup(parent *Element) Token {
+ return &ProcInst{
+ Target: p.Target,
+ Inst: p.Inst,
+ parent: parent,
+ index: p.index,
+ }
+}
+
+// Parent returns processing instruction token's parent element, or nil if it
+// has no parent.
+func (p *ProcInst) Parent() *Element {
+ return p.parent
+}
+
+// Index returns the index of this ProcInst token within its parent element's
+// list of child tokens. If this ProcInst token has no parent, then the
+// function returns -1.
+func (p *ProcInst) Index() int {
+ return p.index
+}
+
+// WriteTo serializes the processing instruction to the writer.
+func (p *ProcInst) WriteTo(w Writer, s *WriteSettings) {
+ w.WriteString("")
+ w.WriteString(p.Target)
+ if p.Inst != "" {
+ w.WriteByte(' ')
+ w.WriteString(p.Inst)
+ }
+ w.WriteString("?>")
+}
+
+// setParent replaces the processing instruction token's parent.
+func (p *ProcInst) setParent(parent *Element) {
+ p.parent = parent
+}
+
+// setIndex sets the processing instruction token's index within its parent
+// element's Child slice.
+func (p *ProcInst) setIndex(index int) {
+ p.index = index
+}
diff --git a/godo/office/etree/helpers.go b/godo/office/etree/helpers.go
new file mode 100644
index 0000000..ea789b6
--- /dev/null
+++ b/godo/office/etree/helpers.go
@@ -0,0 +1,394 @@
+// Copyright 2015-2019 Brett Vickers.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package etree
+
+import (
+ "io"
+ "strings"
+ "unicode/utf8"
+)
+
+type stack[E any] struct {
+ data []E
+}
+
+func (s *stack[E]) empty() bool {
+ return len(s.data) == 0
+}
+
+func (s *stack[E]) push(value E) {
+ s.data = append(s.data, value)
+}
+
+func (s *stack[E]) pop() E {
+ value := s.data[len(s.data)-1]
+ var empty E
+ s.data[len(s.data)-1] = empty
+ s.data = s.data[:len(s.data)-1]
+ return value
+}
+
+func (s *stack[E]) peek() E {
+ return s.data[len(s.data)-1]
+}
+
+type queue[E any] struct {
+ data []E
+ head, tail int
+}
+
+func (f *queue[E]) add(value E) {
+ if f.len()+1 >= len(f.data) {
+ f.grow()
+ }
+ f.data[f.tail] = value
+ if f.tail++; f.tail == len(f.data) {
+ f.tail = 0
+ }
+}
+
+func (f *queue[E]) remove() E {
+ value := f.data[f.head]
+ var empty E
+ f.data[f.head] = empty
+ if f.head++; f.head == len(f.data) {
+ f.head = 0
+ }
+ return value
+}
+
+func (f *queue[E]) len() int {
+ if f.tail >= f.head {
+ return f.tail - f.head
+ }
+ return len(f.data) - f.head + f.tail
+}
+
+func (f *queue[E]) grow() {
+ c := len(f.data) * 2
+ if c == 0 {
+ c = 4
+ }
+ buf, count := make([]E, c), f.len()
+ if f.tail >= f.head {
+ copy(buf[:count], f.data[f.head:f.tail])
+ } else {
+ hindex := len(f.data) - f.head
+ copy(buf[:hindex], f.data[f.head:])
+ copy(buf[hindex:count], f.data[:f.tail])
+ }
+ f.data, f.head, f.tail = buf, 0, count
+}
+
+// xmlReader provides the interface by which an XML byte stream is
+// processed and decoded.
+type xmlReader interface {
+ Bytes() int64
+ Read(p []byte) (n int, err error)
+}
+
+// xmlSimpleReader implements a proxy reader that counts the number of
+// bytes read from its encapsulated reader.
+type xmlSimpleReader struct {
+ r io.Reader
+ bytes int64
+}
+
+func newXmlSimpleReader(r io.Reader) xmlReader {
+ return &xmlSimpleReader{r, 0}
+}
+
+func (xr *xmlSimpleReader) Bytes() int64 {
+ return xr.bytes
+}
+
+func (xr *xmlSimpleReader) Read(p []byte) (n int, err error) {
+ n, err = xr.r.Read(p)
+ xr.bytes += int64(n)
+ return n, err
+}
+
+// xmlPeekReader implements a proxy reader that counts the number of
+// bytes read from its encapsulated reader. It also allows the caller to
+// "peek" at the previous portions of the buffer after they have been
+// parsed.
+type xmlPeekReader struct {
+ r io.Reader
+ bytes int64 // total bytes read by the Read function
+ buf []byte // internal read buffer
+ bufSize int // total bytes used in the read buffer
+ bufOffset int64 // total bytes read when buf was last filled
+ window []byte // current read buffer window
+ peekBuf []byte // buffer used to store data to be peeked at later
+ peekOffset int64 // total read offset of the start of the peek buffer
+}
+
+func newXmlPeekReader(r io.Reader) *xmlPeekReader {
+ buf := make([]byte, 4096)
+ return &xmlPeekReader{
+ r: r,
+ bytes: 0,
+ buf: buf,
+ bufSize: 0,
+ bufOffset: 0,
+ window: buf[0:0],
+ peekBuf: make([]byte, 0),
+ peekOffset: -1,
+ }
+}
+
+func (xr *xmlPeekReader) Bytes() int64 {
+ return xr.bytes
+}
+
+func (xr *xmlPeekReader) Read(p []byte) (n int, err error) {
+ if len(xr.window) == 0 {
+ err = xr.fill()
+ if err != nil {
+ return 0, err
+ }
+ if len(xr.window) == 0 {
+ return 0, nil
+ }
+ }
+
+ if len(xr.window) < len(p) {
+ n = len(xr.window)
+ } else {
+ n = len(p)
+ }
+
+ copy(p, xr.window)
+ xr.window = xr.window[n:]
+ xr.bytes += int64(n)
+
+ return n, err
+}
+
+func (xr *xmlPeekReader) PeekPrepare(offset int64, maxLen int) {
+ if maxLen > cap(xr.peekBuf) {
+ xr.peekBuf = make([]byte, 0, maxLen)
+ }
+ xr.peekBuf = xr.peekBuf[0:0]
+ xr.peekOffset = offset
+ xr.updatePeekBuf()
+}
+
+func (xr *xmlPeekReader) PeekFinalize() []byte {
+ xr.updatePeekBuf()
+ return xr.peekBuf
+}
+
+func (xr *xmlPeekReader) fill() error {
+ xr.bufOffset = xr.bytes
+ xr.bufSize = 0
+ n, err := xr.r.Read(xr.buf)
+ if err != nil {
+ xr.window, xr.bufSize = xr.buf[0:0], 0
+ return err
+ }
+ xr.window, xr.bufSize = xr.buf[:n], n
+ xr.updatePeekBuf()
+ return nil
+}
+
+func (xr *xmlPeekReader) updatePeekBuf() {
+ peekRemain := cap(xr.peekBuf) - len(xr.peekBuf)
+ if xr.peekOffset >= 0 && peekRemain > 0 {
+ rangeMin := xr.peekOffset
+ rangeMax := xr.peekOffset + int64(cap(xr.peekBuf))
+ bufMin := xr.bufOffset
+ bufMax := xr.bufOffset + int64(xr.bufSize)
+ if rangeMin < bufMin {
+ rangeMin = bufMin
+ }
+ if rangeMax > bufMax {
+ rangeMax = bufMax
+ }
+ if rangeMax > rangeMin {
+ rangeMin -= xr.bufOffset
+ rangeMax -= xr.bufOffset
+ if int(rangeMax-rangeMin) > peekRemain {
+ rangeMax = rangeMin + int64(peekRemain)
+ }
+ xr.peekBuf = append(xr.peekBuf, xr.buf[rangeMin:rangeMax]...)
+ }
+ }
+}
+
+// xmlWriter implements a proxy writer that counts the number of
+// bytes written by its encapsulated writer.
+type xmlWriter struct {
+ w io.Writer
+ bytes int64
+}
+
+func newXmlWriter(w io.Writer) *xmlWriter {
+ return &xmlWriter{w: w}
+}
+
+func (xw *xmlWriter) Write(p []byte) (n int, err error) {
+ n, err = xw.w.Write(p)
+ xw.bytes += int64(n)
+ return n, err
+}
+
+// isWhitespace returns true if the byte slice contains only
+// whitespace characters.
+func isWhitespace(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if c := s[i]; c != ' ' && c != '\t' && c != '\n' && c != '\r' {
+ return false
+ }
+ }
+ return true
+}
+
+// spaceMatch returns true if namespace a is the empty string
+// or if namespace a equals namespace b.
+func spaceMatch(a, b string) bool {
+ switch {
+ case a == "":
+ return true
+ default:
+ return a == b
+ }
+}
+
+// spaceDecompose breaks a namespace:tag identifier at the ':'
+// and returns the two parts.
+func spaceDecompose(str string) (space, key string) {
+ colon := strings.IndexByte(str, ':')
+ if colon == -1 {
+ return "", str
+ }
+ return str[:colon], str[colon+1:]
+}
+
+// Strings used by indentCRLF and indentLF
+const (
+ indentSpaces = "\r\n "
+ indentTabs = "\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t"
+)
+
+// indentCRLF returns a CRLF newline followed by n copies of the first
+// non-CRLF character in the source string.
+func indentCRLF(n int, source string) string {
+ switch {
+ case n < 0:
+ return source[:2]
+ case n < len(source)-1:
+ return source[:n+2]
+ default:
+ return source + strings.Repeat(source[2:3], n-len(source)+2)
+ }
+}
+
+// indentLF returns a LF newline followed by n copies of the first non-LF
+// character in the source string.
+func indentLF(n int, source string) string {
+ switch {
+ case n < 0:
+ return source[1:2]
+ case n < len(source)-1:
+ return source[1 : n+2]
+ default:
+ return source[1:] + strings.Repeat(source[2:3], n-len(source)+2)
+ }
+}
+
+// nextIndex returns the index of the next occurrence of byte ch in s,
+// starting from offset. It returns -1 if the byte is not found.
+func nextIndex(s string, ch byte, offset int) int {
+ switch i := strings.IndexByte(s[offset:], ch); i {
+ case -1:
+ return -1
+ default:
+ return offset + i
+ }
+}
+
+// isInteger returns true if the string s contains an integer.
+func isInteger(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if (s[i] < '0' || s[i] > '9') && !(i == 0 && s[i] == '-') {
+ return false
+ }
+ }
+ return true
+}
+
+type escapeMode byte
+
+const (
+ escapeNormal escapeMode = iota
+ escapeCanonicalText
+ escapeCanonicalAttr
+)
+
+// escapeString writes an escaped version of a string to the writer.
+func escapeString(w Writer, s string, m escapeMode) {
+ var esc []byte
+ last := 0
+ for i := 0; i < len(s); {
+ r, width := utf8.DecodeRuneInString(s[i:])
+ i += width
+ switch r {
+ case '&':
+ esc = []byte("&")
+ case '<':
+ esc = []byte("<")
+ case '>':
+ if m == escapeCanonicalAttr {
+ continue
+ }
+ esc = []byte(">")
+ case '\'':
+ if m != escapeNormal {
+ continue
+ }
+ esc = []byte("'")
+ case '"':
+ if m == escapeCanonicalText {
+ continue
+ }
+ esc = []byte(""")
+ case '\t':
+ if m != escapeCanonicalAttr {
+ continue
+ }
+ esc = []byte(" ")
+ case '\n':
+ if m != escapeCanonicalAttr {
+ continue
+ }
+ esc = []byte("
")
+ case '\r':
+ if m == escapeNormal {
+ continue
+ }
+ esc = []byte("
")
+ default:
+ if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) {
+ esc = []byte("\uFFFD")
+ break
+ }
+ continue
+ }
+ w.WriteString(s[last : i-width])
+ w.Write(esc)
+ last = i
+ }
+ w.WriteString(s[last:])
+}
+
+func isInCharacterRange(r rune) bool {
+ return r == 0x09 ||
+ r == 0x0A ||
+ r == 0x0D ||
+ r >= 0x20 && r <= 0xD7FF ||
+ r >= 0xE000 && r <= 0xFFFD ||
+ r >= 0x10000 && r <= 0x10FFFF
+}
diff --git a/godo/office/etree/path.go b/godo/office/etree/path.go
new file mode 100644
index 0000000..8d63096
--- /dev/null
+++ b/godo/office/etree/path.go
@@ -0,0 +1,595 @@
+// Copyright 2015-2019 Brett Vickers.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package etree
+
+import (
+ "strconv"
+ "strings"
+)
+
+/*
+A Path is a string that represents a search path through an etree starting
+from the document root or an arbitrary element. Paths are used with the
+Element object's Find* methods to locate and return desired elements.
+
+A Path consists of a series of slash-separated "selectors", each of which may
+be modified by one or more bracket-enclosed "filters". Selectors are used to
+traverse the etree from element to element, while filters are used to narrow
+the list of candidate elements at each node.
+
+Although etree Path strings are structurally and behaviorally similar to XPath
+strings (https://www.w3.org/TR/1999/REC-xpath-19991116/), they have a more
+limited set of selectors and filtering options.
+
+The following selectors are supported by etree paths:
+
+ . Select the current element.
+ .. Select the parent of the current element.
+ * Select all child elements of the current element.
+ / Select the root element when used at the start of a path.
+ // Select all descendants of the current element.
+ tag Select all child elements with a name matching the tag.
+
+The following basic filters are supported:
+
+ [@attrib] Keep elements with an attribute named attrib.
+ [@attrib='val'] Keep elements with an attribute named attrib and value matching val.
+ [tag] Keep elements with a child element named tag.
+ [tag='val'] Keep elements with a child element named tag and text matching val.
+ [n] Keep the n-th element, where n is a numeric index starting from 1.
+
+The following function-based filters are supported:
+
+ [text()] Keep elements with non-empty text.
+ [text()='val'] Keep elements whose text matches val.
+ [local-name()='val'] Keep elements whose un-prefixed tag matches val.
+ [name()='val'] Keep elements whose full tag exactly matches val.
+ [namespace-prefix()] Keep elements with non-empty namespace prefixes.
+ [namespace-prefix()='val'] Keep elements whose namespace prefix matches val.
+ [namespace-uri()] Keep elements with non-empty namespace URIs.
+ [namespace-uri()='val'] Keep elements whose namespace URI matches val.
+
+Below are some examples of etree path strings.
+
+Select the bookstore child element of the root element:
+
+ /bookstore
+
+Beginning from the root element, select the title elements of all descendant
+book elements having a 'category' attribute of 'WEB':
+
+ //book[@category='WEB']/title
+
+Beginning from the current element, select the first descendant book element
+with a title child element containing the text 'Great Expectations':
+
+ .//book[title='Great Expectations'][1]
+
+Beginning from the current element, select all child elements of book elements
+with an attribute 'language' set to 'english':
+
+ ./book/*[@language='english']
+
+Beginning from the current element, select all child elements of book elements
+containing the text 'special':
+
+ ./book/*[text()='special']
+
+Beginning from the current element, select all descendant book elements whose
+title child element has a 'language' attribute of 'french':
+
+ .//book/title[@language='french']/..
+
+Beginning from the current element, select all descendant book elements
+belonging to the http://www.w3.org/TR/html4/ namespace:
+
+ .//book[namespace-uri()='http://www.w3.org/TR/html4/']
+*/
+type Path struct {
+ segments []segment
+}
+
+// ErrPath is returned by path functions when an invalid etree path is provided.
+type ErrPath string
+
+// Error returns the string describing a path error.
+func (err ErrPath) Error() string {
+ return "etree: " + string(err)
+}
+
+// CompilePath creates an optimized version of an XPath-like string that
+// can be used to query elements in an element tree.
+func CompilePath(path string) (Path, error) {
+ var comp compiler
+ segments := comp.parsePath(path)
+ if comp.err != ErrPath("") {
+ return Path{nil}, comp.err
+ }
+ return Path{segments}, nil
+}
+
+// MustCompilePath creates an optimized version of an XPath-like string that
+// can be used to query elements in an element tree. Panics if an error
+// occurs. Use this function to create Paths when you know the path is
+// valid (i.e., if it's hard-coded).
+func MustCompilePath(path string) Path {
+ p, err := CompilePath(path)
+ if err != nil {
+ panic(err)
+ }
+ return p
+}
+
+// A segment is a portion of a path between "/" characters.
+// It contains one selector and zero or more [filters].
+type segment struct {
+ sel selector
+ filters []filter
+}
+
+func (seg *segment) apply(e *Element, p *pather) {
+ seg.sel.apply(e, p)
+ for _, f := range seg.filters {
+ f.apply(p)
+ }
+}
+
+// A selector selects XML elements for consideration by the
+// path traversal.
+type selector interface {
+ apply(e *Element, p *pather)
+}
+
+// A filter pares down a list of candidate XML elements based
+// on a path filter in [brackets].
+type filter interface {
+ apply(p *pather)
+}
+
+// A pather is helper object that traverses an element tree using
+// a Path object. It collects and deduplicates all elements matching
+// the path query.
+type pather struct {
+ queue queue[node]
+ results []*Element
+ inResults map[*Element]bool
+ candidates []*Element
+ scratch []*Element // used by filters
+}
+
+// A node represents an element and the remaining path segments that
+// should be applied against it by the pather.
+type node struct {
+ e *Element
+ segments []segment
+}
+
+func newPather() *pather {
+ return &pather{
+ results: make([]*Element, 0),
+ inResults: make(map[*Element]bool),
+ candidates: make([]*Element, 0),
+ scratch: make([]*Element, 0),
+ }
+}
+
+// traverse follows the path from the element e, collecting
+// and then returning all elements that match the path's selectors
+// and filters.
+func (p *pather) traverse(e *Element, path Path) []*Element {
+ for p.queue.add(node{e, path.segments}); p.queue.len() > 0; {
+ p.eval(p.queue.remove())
+ }
+ return p.results
+}
+
+// eval evaluates the current path node by applying the remaining
+// path's selector rules against the node's element.
+func (p *pather) eval(n node) {
+ p.candidates = p.candidates[0:0]
+ seg, remain := n.segments[0], n.segments[1:]
+ seg.apply(n.e, p)
+
+ if len(remain) == 0 {
+ for _, c := range p.candidates {
+ if in := p.inResults[c]; !in {
+ p.inResults[c] = true
+ p.results = append(p.results, c)
+ }
+ }
+ } else {
+ for _, c := range p.candidates {
+ p.queue.add(node{c, remain})
+ }
+ }
+}
+
+// A compiler generates a compiled path from a path string.
+type compiler struct {
+ err ErrPath
+}
+
+// parsePath parses an XPath-like string describing a path
+// through an element tree and returns a slice of segment
+// descriptors.
+func (c *compiler) parsePath(path string) []segment {
+ // If path ends with //, fix it
+ if strings.HasSuffix(path, "//") {
+ path += "*"
+ }
+
+ var segments []segment
+
+ // Check for an absolute path
+ if strings.HasPrefix(path, "/") {
+ segments = append(segments, segment{new(selectRoot), []filter{}})
+ path = path[1:]
+ }
+
+ // Split path into segments
+ for _, s := range splitPath(path) {
+ segments = append(segments, c.parseSegment(s))
+ if c.err != ErrPath("") {
+ break
+ }
+ }
+ return segments
+}
+
+func splitPath(path string) []string {
+ var pieces []string
+ start := 0
+ inquote := false
+ var quote byte
+ for i := 0; i+1 <= len(path); i++ {
+ if !inquote {
+ if path[i] == '\'' || path[i] == '"' {
+ inquote, quote = true, path[i]
+ } else if path[i] == '/' {
+ pieces = append(pieces, path[start:i])
+ start = i + 1
+ }
+ } else if path[i] == quote {
+ inquote = false
+ }
+ }
+ return append(pieces, path[start:])
+}
+
+// parseSegment parses a path segment between / characters.
+func (c *compiler) parseSegment(path string) segment {
+ pieces := strings.Split(path, "[")
+ seg := segment{
+ sel: c.parseSelector(pieces[0]),
+ filters: []filter{},
+ }
+ for i := 1; i < len(pieces); i++ {
+ fpath := pieces[i]
+ if len(fpath) == 0 || fpath[len(fpath)-1] != ']' {
+ c.err = ErrPath("path has invalid filter [brackets].")
+ break
+ }
+ seg.filters = append(seg.filters, c.parseFilter(fpath[:len(fpath)-1]))
+ }
+ return seg
+}
+
+// parseSelector parses a selector at the start of a path segment.
+func (c *compiler) parseSelector(path string) selector {
+ switch path {
+ case ".":
+ return new(selectSelf)
+ case "..":
+ return new(selectParent)
+ case "*":
+ return new(selectChildren)
+ case "":
+ return new(selectDescendants)
+ default:
+ return newSelectChildrenByTag(path)
+ }
+}
+
+var fnTable = map[string]func(e *Element) string{
+ "local-name": (*Element).name,
+ "name": (*Element).FullTag,
+ "namespace-prefix": (*Element).namespacePrefix,
+ "namespace-uri": (*Element).NamespaceURI,
+ "text": (*Element).Text,
+}
+
+// parseFilter parses a path filter contained within [brackets].
+func (c *compiler) parseFilter(path string) filter {
+ if len(path) == 0 {
+ c.err = ErrPath("path contains an empty filter expression.")
+ return nil
+ }
+
+ // Filter contains [@attr='val'], [@attr="val"], [fn()='val'],
+ // [fn()="val"], [tag='val'] or [tag="val"]?
+ eqindex := strings.IndexByte(path, '=')
+ if eqindex >= 0 && eqindex+1 < len(path) {
+ quote := path[eqindex+1]
+ if quote == '\'' || quote == '"' {
+ rindex := nextIndex(path, quote, eqindex+2)
+ if rindex != len(path)-1 {
+ c.err = ErrPath("path has mismatched filter quotes.")
+ return nil
+ }
+
+ key := path[:eqindex]
+ value := path[eqindex+2 : rindex]
+
+ switch {
+ case key[0] == '@':
+ return newFilterAttrVal(key[1:], value)
+ case strings.HasSuffix(key, "()"):
+ name := key[:len(key)-2]
+ if fn, ok := fnTable[name]; ok {
+ return newFilterFuncVal(fn, value)
+ }
+ c.err = ErrPath("path has unknown function " + name)
+ return nil
+ default:
+ return newFilterChildText(key, value)
+ }
+ }
+ }
+
+ // Filter contains [@attr], [N], [tag] or [fn()]
+ switch {
+ case path[0] == '@':
+ return newFilterAttr(path[1:])
+ case strings.HasSuffix(path, "()"):
+ name := path[:len(path)-2]
+ if fn, ok := fnTable[name]; ok {
+ return newFilterFunc(fn)
+ }
+ c.err = ErrPath("path has unknown function " + name)
+ return nil
+ case isInteger(path):
+ pos, _ := strconv.Atoi(path)
+ switch {
+ case pos > 0:
+ return newFilterPos(pos - 1)
+ default:
+ return newFilterPos(pos)
+ }
+ default:
+ return newFilterChild(path)
+ }
+}
+
+// selectSelf selects the current element into the candidate list.
+type selectSelf struct{}
+
+func (s *selectSelf) apply(e *Element, p *pather) {
+ p.candidates = append(p.candidates, e)
+}
+
+// selectRoot selects the element's root node.
+type selectRoot struct{}
+
+func (s *selectRoot) apply(e *Element, p *pather) {
+ root := e
+ for root.parent != nil {
+ root = root.parent
+ }
+ p.candidates = append(p.candidates, root)
+}
+
+// selectParent selects the element's parent into the candidate list.
+type selectParent struct{}
+
+func (s *selectParent) apply(e *Element, p *pather) {
+ if e.parent != nil {
+ p.candidates = append(p.candidates, e.parent)
+ }
+}
+
+// selectChildren selects the element's child elements into the
+// candidate list.
+type selectChildren struct{}
+
+func (s *selectChildren) apply(e *Element, p *pather) {
+ for _, c := range e.Child {
+ if c, ok := c.(*Element); ok {
+ p.candidates = append(p.candidates, c)
+ }
+ }
+}
+
+// selectDescendants selects all descendant child elements
+// of the element into the candidate list.
+type selectDescendants struct{}
+
+func (s *selectDescendants) apply(e *Element, p *pather) {
+ var queue queue[*Element]
+ for queue.add(e); queue.len() > 0; {
+ e := queue.remove()
+ p.candidates = append(p.candidates, e)
+ for _, c := range e.Child {
+ if c, ok := c.(*Element); ok {
+ queue.add(c)
+ }
+ }
+ }
+}
+
+// selectChildrenByTag selects into the candidate list all child
+// elements of the element having the specified tag.
+type selectChildrenByTag struct {
+ space, tag string
+}
+
+func newSelectChildrenByTag(path string) *selectChildrenByTag {
+ s, l := spaceDecompose(path)
+ return &selectChildrenByTag{s, l}
+}
+
+func (s *selectChildrenByTag) apply(e *Element, p *pather) {
+ for _, c := range e.Child {
+ if c, ok := c.(*Element); ok && spaceMatch(s.space, c.Space) && s.tag == c.Tag {
+ p.candidates = append(p.candidates, c)
+ }
+ }
+}
+
+// filterPos filters the candidate list, keeping only the
+// candidate at the specified index.
+type filterPos struct {
+ index int
+}
+
+func newFilterPos(pos int) *filterPos {
+ return &filterPos{pos}
+}
+
+func (f *filterPos) apply(p *pather) {
+ if f.index >= 0 {
+ if f.index < len(p.candidates) {
+ p.scratch = append(p.scratch, p.candidates[f.index])
+ }
+ } else {
+ if -f.index <= len(p.candidates) {
+ p.scratch = append(p.scratch, p.candidates[len(p.candidates)+f.index])
+ }
+ }
+ p.candidates, p.scratch = p.scratch, p.candidates[0:0]
+}
+
+// filterAttr filters the candidate list for elements having
+// the specified attribute.
+type filterAttr struct {
+ space, key string
+}
+
+func newFilterAttr(str string) *filterAttr {
+ s, l := spaceDecompose(str)
+ return &filterAttr{s, l}
+}
+
+func (f *filterAttr) apply(p *pather) {
+ for _, c := range p.candidates {
+ for _, a := range c.Attr {
+ if spaceMatch(f.space, a.Space) && f.key == a.Key {
+ p.scratch = append(p.scratch, c)
+ break
+ }
+ }
+ }
+ p.candidates, p.scratch = p.scratch, p.candidates[0:0]
+}
+
+// filterAttrVal filters the candidate list for elements having
+// the specified attribute with the specified value.
+type filterAttrVal struct {
+ space, key, val string
+}
+
+func newFilterAttrVal(str, value string) *filterAttrVal {
+ s, l := spaceDecompose(str)
+ return &filterAttrVal{s, l, value}
+}
+
+func (f *filterAttrVal) apply(p *pather) {
+ for _, c := range p.candidates {
+ for _, a := range c.Attr {
+ if spaceMatch(f.space, a.Space) && f.key == a.Key && f.val == a.Value {
+ p.scratch = append(p.scratch, c)
+ break
+ }
+ }
+ }
+ p.candidates, p.scratch = p.scratch, p.candidates[0:0]
+}
+
+// filterFunc filters the candidate list for elements satisfying a custom
+// boolean function.
+type filterFunc struct {
+ fn func(e *Element) string
+}
+
+func newFilterFunc(fn func(e *Element) string) *filterFunc {
+ return &filterFunc{fn}
+}
+
+func (f *filterFunc) apply(p *pather) {
+ for _, c := range p.candidates {
+ if f.fn(c) != "" {
+ p.scratch = append(p.scratch, c)
+ }
+ }
+ p.candidates, p.scratch = p.scratch, p.candidates[0:0]
+}
+
+// filterFuncVal filters the candidate list for elements containing a value
+// matching the result of a custom function.
+type filterFuncVal struct {
+ fn func(e *Element) string
+ val string
+}
+
+func newFilterFuncVal(fn func(e *Element) string, value string) *filterFuncVal {
+ return &filterFuncVal{fn, value}
+}
+
+func (f *filterFuncVal) apply(p *pather) {
+ for _, c := range p.candidates {
+ if f.fn(c) == f.val {
+ p.scratch = append(p.scratch, c)
+ }
+ }
+ p.candidates, p.scratch = p.scratch, p.candidates[0:0]
+}
+
+// filterChild filters the candidate list for elements having
+// a child element with the specified tag.
+type filterChild struct {
+ space, tag string
+}
+
+func newFilterChild(str string) *filterChild {
+ s, l := spaceDecompose(str)
+ return &filterChild{s, l}
+}
+
+func (f *filterChild) apply(p *pather) {
+ for _, c := range p.candidates {
+ for _, cc := range c.Child {
+ if cc, ok := cc.(*Element); ok &&
+ spaceMatch(f.space, cc.Space) &&
+ f.tag == cc.Tag {
+ p.scratch = append(p.scratch, c)
+ }
+ }
+ }
+ p.candidates, p.scratch = p.scratch, p.candidates[0:0]
+}
+
+// filterChildText filters the candidate list for elements having
+// a child element with the specified tag and text.
+type filterChildText struct {
+ space, tag, text string
+}
+
+func newFilterChildText(str, text string) *filterChildText {
+ s, l := spaceDecompose(str)
+ return &filterChildText{s, l, text}
+}
+
+func (f *filterChildText) apply(p *pather) {
+ for _, c := range p.candidates {
+ for _, cc := range c.Child {
+ if cc, ok := cc.(*Element); ok &&
+ spaceMatch(f.space, cc.Space) &&
+ f.tag == cc.Tag &&
+ f.text == cc.Text() {
+ p.scratch = append(p.scratch, c)
+ }
+ }
+ }
+ p.candidates, p.scratch = p.scratch, p.candidates[0:0]
+}
diff --git a/godo/ai/convert/tidy.go b/godo/office/etree/tidy.go
similarity index 89%
rename from godo/ai/convert/tidy.go
rename to godo/office/etree/tidy.go
index 5d22962..2ba1b0f 100644
--- a/godo/ai/convert/tidy.go
+++ b/godo/office/etree/tidy.go
@@ -15,20 +15,18 @@
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see .
*/
-package convert
+package etree
import (
"bytes"
"fmt"
"io"
-
- "github.com/beevik/etree"
)
// TidyWithEtree 使用beevik/etree库进行简单的XML清理
func Tidy(r io.Reader) ([]byte, error) {
// 读取并解析XML
- doc := etree.NewDocument()
+ doc := NewDocument()
if _, err := doc.ReadFrom(r); err != nil {
return nil, fmt.Errorf("error reading and parsing XML: %w", err)
}
@@ -46,10 +44,10 @@ func Tidy(r io.Reader) ([]byte, error) {
}
// removeEmptyNodes 遍历XML树并移除空节点
-func removeEmptyNodes(node *etree.Element) {
+func removeEmptyNodes(node *Element) {
for i := len(node.Child) - 1; i >= 0; i-- { // 逆序遍历以安全删除
token := node.Child[i]
- element, ok := token.(*etree.Element) // 检查是否为etree.Element类型
+ element, ok := token.(*Element) // 检查是否为Element类型
if ok {
text := element.Text() // 获取元素的文本
if text == "" && len(element.Attr) == 0 && len(element.Child) == 0 {
diff --git a/godo/ai/convert/html.go b/godo/office/html.go
similarity index 62%
rename from godo/ai/convert/html.go
rename to godo/office/html.go
index 7de599f..2e8dcc6 100644
--- a/godo/ai/convert/html.go
+++ b/godo/office/html.go
@@ -1,31 +1,25 @@
-/*
- * GodoOS - A lightweight cloud desktop
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package convert
+package office
import (
"bytes"
"io"
+ "os"
"regexp"
"strings"
"golang.org/x/net/html"
)
+func html2txt(filename string) (string, error) {
+ file, err := os.Open(filename)
+ if err != nil {
+ return "", err
+ }
+ defer file.Close()
+ return ConvertHTML(file)
+
+}
+
// 去除字符串中的HTML标签
func TrimHtml(text string) string {
// 去除字符串中的HTML标签
diff --git a/godo/office/json.go b/godo/office/json.go
new file mode 100644
index 0000000..9d8cd85
--- /dev/null
+++ b/godo/office/json.go
@@ -0,0 +1,64 @@
+package office
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "os"
+ "regexp"
+ "strings"
+)
+
+// extractTextFromJSON 递归地从 JSON 数据中提取纯文本
+func extractTextFromJSON(data interface{}) []string {
+ var texts []string
+
+ switch v := data.(type) {
+ case map[string]interface{}:
+ for _, value := range v {
+ texts = append(texts, extractTextFromJSON(value)...)
+ }
+ case []interface{}:
+ for _, item := range v {
+ texts = append(texts, extractTextFromJSON(item)...)
+ }
+ case string:
+ texts = append(texts, v)
+ default:
+ // 其他类型忽略
+ }
+
+ return texts
+}
+
+func json2txt(filename string) (string, error) {
+ file, err := os.Open(filename)
+ if err != nil {
+ return "", err
+ }
+ defer file.Close()
+
+ byteValue, err := ioutil.ReadAll(file)
+ if err != nil {
+ return "", err
+ }
+
+ var jsonData interface{}
+ err = json.Unmarshal(byteValue, &jsonData)
+ if err != nil {
+ return "", err
+ }
+
+ plainText := extractTextFromJSON(jsonData)
+
+ // 将切片中的所有字符串连接成一个字符串
+ plainTextStr := strings.Join(plainText, " ")
+
+ // 移除多余的空格
+ re := regexp.MustCompile(`\s+`)
+ plainTextStr = re.ReplaceAllString(plainTextStr, " ")
+
+ // 移除开头和结尾的空格
+ plainTextStr = strings.TrimSpace(plainTextStr)
+
+ return plainTextStr, nil
+}
diff --git a/godo/office/linux.go b/godo/office/linux.go
new file mode 100644
index 0000000..f5f821d
--- /dev/null
+++ b/godo/office/linux.go
@@ -0,0 +1,27 @@
+//go:build linux
+// +build linux
+
+package office
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+func getFileInfoData(data *Document) (bool, error) {
+ fileinfo, err := os.Stat(data.path)
+ if err != nil {
+ return false, err
+ }
+ data.Filename = fileinfo.Name()
+ data.Title = data.Filename
+ data.Size = int(fileinfo.Size())
+
+ stat := fileinfo.Sys().(*syscall.Stat_t)
+ data.Createtime = time.Unix(stat.Ctim.Sec, stat.Ctim.Nsec)
+ data.Modifytime = time.Unix(stat.Mtim.Sec, stat.Mtim.Nsec)
+ data.Accesstime = time.Unix(stat.Atim.Sec, stat.Atim.Nsec)
+
+ return true, nil
+}
diff --git a/godo/office/md.go b/godo/office/md.go
new file mode 100644
index 0000000..c5831d5
--- /dev/null
+++ b/godo/office/md.go
@@ -0,0 +1,48 @@
+package office
+
+import (
+ "bufio"
+ "os"
+ "regexp"
+ "strings"
+)
+
+var (
+ reHTML = regexp.MustCompile(`<[^>]*>`)
+ reMarkdown = regexp.MustCompile(`[\*_|#]{1,4}`)
+ reWhitespace = regexp.MustCompile(`\s+`)
+)
+
+func md2txt(filename string) (string, error) {
+ file, err := os.Open(filename)
+ if err != nil {
+ return "", err
+ }
+ defer file.Close()
+
+ var lines []string
+ scanner := bufio.NewScanner(file)
+ for scanner.Scan() {
+ line := scanner.Text()
+ // 移除 HTML 标签
+ line = reHTML.ReplaceAllString(line, "")
+ // 移除 Markdown 格式符号
+ line = reMarkdown.ReplaceAllString(line, "")
+ lines = append(lines, line)
+ }
+
+ if err := scanner.Err(); err != nil {
+ return "", err
+ }
+
+ // 合并所有行
+ content := strings.Join(lines, " ")
+
+ // 移除多余的空格
+ content = reWhitespace.ReplaceAllString(content, " ")
+
+ // 移除开头和结尾的空格
+ content = strings.TrimSpace(content)
+
+ return content, nil
+}
diff --git a/godo/ai/convert/odt.go b/godo/office/odt.go
similarity index 58%
rename from godo/ai/convert/odt.go
rename to godo/office/odt.go
index 6d59332..1d18fd7 100644
--- a/godo/ai/convert/odt.go
+++ b/godo/office/odt.go
@@ -1,36 +1,24 @@
-/*
- * GodoOS - A lightweight cloud desktop
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package convert
+package office
import (
"archive/zip"
"bytes"
"fmt"
"io"
+ "os"
"time"
)
// ConvertODT converts a ODT file to text
-func ConvertODT(r io.Reader) (string, error) {
+func odt2txt(filePath string) (string, error) {
meta := make(map[string]string)
var textBody string
-
- b, err := io.ReadAll(io.LimitReader(r, maxBytes))
+ file, err := os.Open(filePath)
+ if err != nil {
+ return "", fmt.Errorf("error opening file: %v", err)
+ }
+ defer file.Close()
+ b, err := io.ReadAll(io.LimitReader(file, maxBytes))
if err != nil {
return "", err
}
@@ -80,12 +68,6 @@ func ConvertODT(r io.Reader) (string, error) {
}
}
}
- // 在成功解析ZIP文件后,添加图片提取逻辑
- images, err := findImagesInZip(zr)
- if err != nil {
- fmt.Printf("Error extracting images: %v", err)
- }
- fmt.Printf("Images: %v", images)
return textBody, nil
}
diff --git a/godo/office/office.go b/godo/office/office.go
new file mode 100644
index 0000000..1fe9803
--- /dev/null
+++ b/godo/office/office.go
@@ -0,0 +1,321 @@
+/*
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied. See the License for the
+ specific language governing permissions and limitations
+ under the License.
+*/
+
+package office
+
+import (
+ "archive/zip"
+ "bufio"
+ "bytes"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ pdf "godo/office/pdf"
+ xlsx "godo/office/xlsx"
+ "html"
+ "os"
+ "path"
+ "path/filepath"
+ "regexp"
+ "strings"
+)
+
+func GetDocument(pathname string) (*Document, error) {
+ abPath, err := filepath.Abs(pathname)
+ if err != nil {
+ return nil, err
+ }
+ filename := path.Base(pathname)
+ data := Document{path: pathname, RePath: abPath, Title: filename}
+ extension := path.Ext(pathname)
+ _, err = getFileInfoData(&data)
+ if err != nil {
+ return &data, err
+ }
+ switch extension {
+ case ".docx":
+ _, e := getMetaData(&data)
+ if e != nil {
+ fmt.Printf("⚠️ %s", e.Error())
+ }
+ _, err = getContentData(&data, docx2txt)
+ case ".pptx":
+ _, e := getMetaData(&data)
+ if e != nil {
+ fmt.Printf("⚠️ %s", e.Error())
+ }
+ _, err = getContentData(&data, pptx2txt)
+ case ".xlsx":
+ _, e := getMetaData(&data)
+ if e != nil {
+ fmt.Printf("⚠️ %s", e.Error())
+ }
+ _, err = getContentData(&data, xlsx2txt)
+ case ".pdf":
+ _, err = getContentData(&data, pdf2txt)
+ case ".doc":
+ _, err = getContentData(&data, doc2txt)
+ case ".ppt":
+ _, err = getContentData(&data, ppt2txt)
+ case ".xls":
+ _, err = getContentData(&data, xls2txt)
+ case ".epub":
+ _, err = getContentData(&data, epub2txt)
+ case ".odt":
+ _, err = getContentData(&data, odt2txt)
+ case ".xml":
+ _, err = getContentData(&data, xml2txt)
+ case ".rtf":
+ _, err = getContentData(&data, rtf2txt)
+ case ".md":
+ _, err = getContentData(&data, md2txt)
+ case ".txt":
+ _, err = getContentData(&data, text2txt)
+ case ".xhtml", ".html", ".htm":
+ _, err = getContentData(&data, html2txt)
+ case ".json":
+ _, err = getContentData(&data, json2txt)
+ }
+ if err != nil {
+ return &data, err
+ }
+ return &data, nil
+}
+
+// Read the meta data of office files (only *.docx, *.xlsx, *.pptx) and insert into the interface
+func getMetaData(data *Document) (bool, error) {
+ file, err := os.Open(data.path)
+ if err != nil {
+ return false, err
+ }
+ defer file.Close()
+ meta, err := GetContent(file)
+ if err != nil {
+ return false, errors.New("failed to get office meta data")
+ }
+ if meta.Title != "" {
+ data.Title = meta.Title
+ }
+ data.Subject = meta.Subject
+ data.Creator = meta.Creator
+ data.Keywords = meta.Keywords
+ data.Description = meta.Description
+ data.Lastmodifiedby = meta.LastModifiedBy
+ data.Revision = meta.Revision
+ data.Category = meta.Category
+ data.Content = meta.Category
+ return true, nil
+}
+func GetContent(document *os.File) (fields XMLContent, err error) {
+ // Attempt to read the document file directly as a zip file.
+ z, err := zip.OpenReader(document.Name())
+ if err != nil {
+ return fields, errors.New("failed to open the file as zip")
+ }
+ defer z.Close()
+
+ var xmlFile string
+ for _, file := range z.File {
+ if file.Name == "docProps/core.xml" {
+ rc, err := file.Open()
+ if err != nil {
+ return fields, errors.New("failed to open docProps/core.xml")
+ }
+ defer rc.Close()
+
+ scanner := bufio.NewScanner(rc)
+ for scanner.Scan() {
+ xmlFile += scanner.Text()
+ }
+ if err := scanner.Err(); err != nil {
+ return fields, errors.New("failed to read from docProps/core.xml")
+ }
+ break // Exit loop after finding and reading core.xml
+ }
+ }
+
+ // Unmarshal the collected XML content into the XMLContent struct
+ if err := xml.Unmarshal([]byte(xmlFile), &fields); err != nil {
+ return fields, errors.New("failed to Unmarshal")
+ }
+
+ return fields, nil
+}
+
+// Read the content of office files and insert into the interface
+func getContentData(data *Document, reader DocReader) (bool, error) {
+ content, err := reader(data.path)
+ if err != nil {
+ return false, err
+ }
+ data.Content = content
+ return true, nil
+}
+
+// Read the file information of any files and insert into the interface
+
+func removeStrangeChars(input string) string {
+ // Define the regex pattern for allowed characters
+ re := regexp.MustCompile("[�\x13\x0b]+")
+ // Replace all disallowed characters with an empty string
+ return re.ReplaceAllString(input, " ")
+}
+
+func docx2txt(filename string) (string, error) {
+ data_docx, err := ReadDocxFile(filename) // Read data from docx file
+ if err != nil {
+ return "", err
+ }
+ defer data_docx.Close()
+ text_docx := data_docx.Editable().GetContent() // Get whole docx data as XML formated text
+ text_docx = PARA_RE.ReplaceAllString(text_docx, "\n") // Replace the end of paragraphs ( 0 {
+ text_row = fmt.Sprintf("%s\t%s", text_row, col.Value)
+ } else {
+ text_row = fmt.Sprintf("%s%s", text_row, col.Value)
+ }
+ }
+ if rows_xlsx != "" { // Save all rows as ONE string
+ rows_xlsx = fmt.Sprintf("%s\n%s", rows_xlsx, text_row)
+ } else {
+ rows_xlsx = fmt.Sprintf("%s%s", rows_xlsx, text_row)
+ }
+ }
+ }
+ // fmt.Println(rows_xlsx)
+ return rows_xlsx, nil
+}
+
+func pdf2txt(filename string) (string, error) { // BUG: Cannot get text from specific (or really malformed?) pages
+ file_pdf, data_pdf, err := pdf.Open(filename) // Read data from pdf file
+ if err != nil {
+ return "", err
+ }
+ defer file_pdf.Close()
+
+ var buff_pdf bytes.Buffer
+ bytes_pdf, err := data_pdf.GetPlainText() // Get text of entire pdf file
+ if err != nil {
+ return "", err
+ }
+
+ buff_pdf.ReadFrom(bytes_pdf)
+ text_pdf := buff_pdf.String()
+ // fmt.Println(text_pdf)
+ return text_pdf, nil
+}
+
+func doc2txt(filename string) (string, error) {
+ file_doc, _ := os.Open(filename) // Open doc file
+ data_doc, err := DOC2Text(file_doc) // Read data from a doc file
+ if err != nil {
+ return "", err
+ }
+ defer file_doc.Close()
+
+ actual := data_doc.(*bytes.Buffer) // Buffer for hold line text of doc file
+ text_doc := ""
+ for aline, err := actual.ReadString('\r'); err == nil; aline, err = actual.ReadString('\r') { // Get text by line
+ aline = strings.Trim(aline, " \n\r")
+ if aline != "" {
+ if text_doc != "" {
+ text_doc = fmt.Sprintf("%s\n%s", text_doc, removeStrangeChars(aline))
+ } else {
+ text_doc = fmt.Sprintf("%s%s", text_doc, removeStrangeChars(aline))
+ }
+ }
+ }
+ text_doc = removeStrangeChars(text_doc)
+ // fmt.Println(text_doc)
+ return text_doc, nil
+}
+
+func ppt2txt(filename string) (string, error) {
+ file_ppt, err := os.Open(filename) // Open ppt file
+ if err != nil {
+ return "", err
+ }
+ defer file_ppt.Close()
+
+ text_ppt, err := ExtractPPTText(file_ppt) // Read text from a ppt file
+ if err != nil {
+ return "", err
+ }
+ text_ppt = removeStrangeChars(text_ppt)
+ // fmt.Println(text_ppt)
+ return text_ppt, nil
+}
+
+func xls2txt(filename string) (string, error) {
+ file_xls, err := os.Open(filename) // Open xls file
+ if err != nil {
+ return "", err
+ }
+ defer file_xls.Close()
+
+ text_xls, err := XLS2Text(file_xls) // Convert xls data to an array of rows (include all sheets)
+ if err != nil {
+ return "", err
+ }
+ text_xls = removeStrangeChars(text_xls)
+ // fmt.Println(text_xls)
+ return text_xls, nil
+}
diff --git a/godo/office/ole2/LICENSE b/godo/office/ole2/LICENSE
new file mode 100644
index 0000000..ad410e1
--- /dev/null
+++ b/godo/office/ole2/LICENSE
@@ -0,0 +1,201 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/godo/office/ole2/README.md b/godo/office/ole2/README.md
new file mode 100644
index 0000000..b8f03e0
--- /dev/null
+++ b/godo/office/ole2/README.md
@@ -0,0 +1,2 @@
+# ole2
+Microsoft Compound Document File Format library in Golang
diff --git a/godo/office/ole2/dir.go b/godo/office/ole2/dir.go
new file mode 100644
index 0000000..d6cc833
--- /dev/null
+++ b/godo/office/ole2/dir.go
@@ -0,0 +1,35 @@
+package ole2
+
+import (
+ "unicode/utf16"
+)
+
+const (
+ EMPTY = iota
+ USERSTORAGE = iota
+ USERSTREAM = iota
+ LOCKBYTES = iota
+ PROPERTY = iota
+ ROOT = iota
+)
+
+type File struct {
+ NameBts [32]uint16
+ Bsize uint16
+ Type byte
+ Flag byte
+ Left uint32
+ Right uint32
+ Child uint32
+ Guid [8]uint16
+ Userflags uint32
+ Time [2]uint64
+ Sstart uint32
+ Size uint32
+ Proptype uint32
+}
+
+func (d *File) Name() string {
+ runes := utf16.Decode(d.NameBts[:d.Bsize/2-1])
+ return string(runes)
+}
diff --git a/godo/office/ole2/header.go b/godo/office/ole2/header.go
new file mode 100644
index 0000000..d2ca47a
--- /dev/null
+++ b/godo/office/ole2/header.go
@@ -0,0 +1,42 @@
+package ole2
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+)
+
+type Header struct {
+ Id [2]uint32
+ Clid [4]uint32
+ Verminor uint16
+ Verdll uint16
+ Byteorder uint16
+ Lsectorb uint16
+ Lssectorb uint16
+ _ uint16
+ _ uint64
+
+ Cfat uint32 //Total number of sectors used for the sector allocation table
+ Dirstart uint32 //SecID of first sector of the directory stream
+
+ _ uint32
+
+ Sectorcutoff uint32 //Minimum size of a standard stream
+ Sfatstart uint32 //SecID of first sector of the short-sector allocation table
+ Csfat uint32 //Total number of sectors used for the short-sector allocation table
+ Difstart uint32 //SecID of first sector of the master sector allocation table
+ Cdif uint32 //Total number of sectors used for the master sector allocation table
+ Msat [109]uint32
+}
+
+func parseHeader(bts []byte) (*Header, error) {
+ buf := bytes.NewBuffer(bts)
+ header := new(Header)
+ binary.Read(buf, binary.LittleEndian, header)
+ if header.Id[0] != 0xE011CFD0 || header.Id[1] != 0xE11AB1A1 || header.Byteorder != 0xFFFE {
+ return nil, fmt.Errorf("not an excel file")
+ }
+
+ return header, nil
+}
diff --git a/godo/office/ole2/ole.go b/godo/office/ole2/ole.go
new file mode 100644
index 0000000..9a2f96a
--- /dev/null
+++ b/godo/office/ole2/ole.go
@@ -0,0 +1,156 @@
+package ole2
+
+import (
+ "encoding/binary"
+ "io"
+)
+
+var ENDOFCHAIN = uint32(0xFFFFFFFE) //-2
+var FREESECT = uint32(0xFFFFFFFF) // -1
+
+type Ole struct {
+ header *Header
+ Lsector uint32
+ Lssector uint32
+ SecID []uint32
+ SSecID []uint32
+ Files []File
+ reader io.ReadSeeker
+}
+
+func Open(reader io.ReadSeeker, charset string) (ole *Ole, err error) {
+ var header *Header
+ var hbts = make([]byte, 512)
+ reader.Read(hbts)
+ if header, err = parseHeader(hbts); err == nil {
+ ole = new(Ole)
+ ole.reader = reader
+ ole.header = header
+ ole.Lsector = 512 //TODO
+ ole.Lssector = 64 //TODO
+ err = ole.readMSAT()
+ return ole, err
+ }
+
+ return nil, err
+}
+
+func (o *Ole) ListDir() (dir []*File, err error) {
+ sector := o.stream_read(o.header.Dirstart, 0)
+ dir = make([]*File, 0)
+ for {
+ d := new(File)
+ err = binary.Read(sector, binary.LittleEndian, d)
+ if err == nil && d.Type != EMPTY {
+ dir = append(dir, d)
+ } else {
+ break
+ }
+ }
+ if err == io.EOF && dir != nil {
+ return dir, nil
+ }
+
+ return
+}
+
+func (o *Ole) OpenFile(file *File, root *File) io.ReadSeeker {
+ if file.Size < o.header.Sectorcutoff {
+ return o.short_stream_read(file.Sstart, file.Size, root.Sstart)
+ } else {
+ return o.stream_read(file.Sstart, file.Size)
+ }
+}
+
+// Read MSAT
+func (o *Ole) readMSAT() error {
+ // int sectorNum;
+
+ count := uint32(109)
+ if o.header.Cfat < 109 {
+ count = o.header.Cfat
+ }
+
+ for i := uint32(0); i < count; i++ {
+ if sector, err := o.sector_read(o.header.Msat[i]); err == nil {
+ sids := sector.AllValues(o.Lsector)
+ o.SecID = append(o.SecID, sids...)
+ } else {
+ return err
+ }
+ }
+
+ for sid := o.header.Difstart; sid != ENDOFCHAIN; {
+ if sector, err := o.sector_read(sid); err == nil {
+ sids := sector.MsatValues(o.Lsector)
+
+ for _, sid := range sids {
+ if sector, err := o.sector_read(sid); err == nil {
+ sids := sector.AllValues(o.Lsector)
+
+ o.SecID = append(o.SecID, sids...)
+ } else {
+ return err
+ }
+ }
+
+ sid = sector.NextSid(o.Lsector)
+ } else {
+ return err
+ }
+ }
+
+ for i := uint32(0); i < o.header.Csfat; i++ {
+ sid := o.header.Sfatstart
+
+ if sid != ENDOFCHAIN {
+ if sector, err := o.sector_read(sid); err == nil {
+ sids := sector.MsatValues(o.Lsector)
+
+ o.SSecID = append(o.SSecID, sids...)
+
+ sid = sector.NextSid(o.Lsector)
+ } else {
+ return err
+ }
+ }
+ }
+ return nil
+
+}
+
+func (o *Ole) stream_read(sid uint32, size uint32) *StreamReader {
+ return &StreamReader{o.SecID, sid, o.reader, sid, 0, o.Lsector, int64(size), 0, sector_pos}
+}
+
+func (o *Ole) short_stream_read(sid uint32, size uint32, startSecId uint32) *StreamReader {
+ ssatReader := &StreamReader{o.SecID, startSecId, o.reader, sid, 0, o.Lsector, int64(uint32(len(o.SSecID)) * o.Lssector), 0, sector_pos}
+ return &StreamReader{o.SSecID, sid, ssatReader, sid, 0, o.Lssector, int64(size), 0, short_sector_pos}
+}
+
+func (o *Ole) sector_read(sid uint32) (Sector, error) {
+ return o.sector_read_internal(sid, o.Lsector)
+}
+
+func (o *Ole) short_sector_read(sid uint32) (Sector, error) {
+ return o.sector_read_internal(sid, o.Lssector)
+}
+
+func (o *Ole) sector_read_internal(sid, size uint32) (Sector, error) {
+ pos := sector_pos(sid, size)
+ if _, err := o.reader.Seek(int64(pos), 0); err == nil {
+ var bts = make([]byte, size)
+ o.reader.Read(bts)
+ return Sector(bts), nil
+ } else {
+ return nil, err
+ }
+}
+
+func sector_pos(sid uint32, size uint32) uint32 {
+ return 512 + sid*size
+}
+
+func short_sector_pos(sid uint32, size uint32) uint32 {
+ return sid * size
+}
diff --git a/godo/office/ole2/pss.go b/godo/office/ole2/pss.go
new file mode 100644
index 0000000..ac6e54e
--- /dev/null
+++ b/godo/office/ole2/pss.go
@@ -0,0 +1,19 @@
+package ole2
+
+import ()
+
+type PSS struct {
+ name [64]byte
+ bsize uint16
+ typ byte
+ flag byte
+ left uint32
+ right uint32
+ child uint32
+ guid [16]uint16
+ userflags uint32
+ time [2]uint64
+ sstart uint32
+ size uint32
+ _ uint32
+}
diff --git a/godo/office/ole2/sector.go b/godo/office/ole2/sector.go
new file mode 100644
index 0000000..ad9f33d
--- /dev/null
+++ b/godo/office/ole2/sector.go
@@ -0,0 +1,37 @@
+package ole2
+
+import (
+ "bytes"
+ "encoding/binary"
+)
+
+type Sector []byte
+
+func (s *Sector) Uint32(bit uint32) uint32 {
+ return binary.LittleEndian.Uint32((*s)[bit : bit+4])
+}
+
+func (s *Sector) NextSid(size uint32) uint32 {
+ return s.Uint32(size - 4)
+}
+
+func (s *Sector) MsatValues(size uint32) []uint32 {
+
+ return s.values(size, int(size/4-1))
+}
+
+func (s *Sector) AllValues(size uint32) []uint32 {
+
+ return s.values(size, int(size/4))
+}
+
+func (s *Sector) values(size uint32, length int) []uint32 {
+
+ var res = make([]uint32, length)
+
+ buf := bytes.NewBuffer((*s))
+
+ binary.Read(buf, binary.LittleEndian, res)
+
+ return res
+}
diff --git a/godo/office/ole2/stream.go b/godo/office/ole2/stream.go
new file mode 100644
index 0000000..5c52765
--- /dev/null
+++ b/godo/office/ole2/stream.go
@@ -0,0 +1,13 @@
+package ole2
+
+type Stream struct {
+ Ole *Ole
+ Start uint32
+ Pos uint32
+ Cfat int
+ Size int
+ Fatpos uint32
+ Bufsize uint32
+ Eof byte
+ Sfat bool
+}
diff --git a/godo/office/ole2/stream_reader.go b/godo/office/ole2/stream_reader.go
new file mode 100644
index 0000000..28a9562
--- /dev/null
+++ b/godo/office/ole2/stream_reader.go
@@ -0,0 +1,96 @@
+package ole2
+
+import (
+ "io"
+ "log"
+)
+
+var DEBUG = false
+
+type StreamReader struct {
+ sat []uint32
+ start uint32
+ reader io.ReadSeeker
+ offset_of_sector uint32
+ offset_in_sector uint32
+ size_sector uint32
+ size int64
+ offset int64
+ sector_pos func(uint32, uint32) uint32
+}
+
+func (r *StreamReader) Read(p []byte) (n int, err error) {
+ if r.offset_of_sector == ENDOFCHAIN {
+ return 0, io.EOF
+ }
+ pos := r.sector_pos(r.offset_of_sector, r.size_sector) + r.offset_in_sector
+ r.reader.Seek(int64(pos), 0)
+ readed := uint32(0)
+ for remainLen := uint32(len(p)) - readed; remainLen > r.size_sector-r.offset_in_sector; remainLen = uint32(len(p)) - readed {
+ if n, err := r.reader.Read(p[readed : readed+r.size_sector-r.offset_in_sector]); err != nil {
+ return int(readed) + n, err
+ } else {
+ readed += uint32(n)
+ r.offset_in_sector = 0
+ if r.offset_of_sector >= uint32(len(r.sat)) {
+ log.Fatal(`
+ THIS SHOULD NOT HAPPEN, IF YOUR PROGRAM BREAK,
+ COMMENT THIS LINE TO CONTINUE AND MAIL ME XLS FILE
+ TO TEST, THANKS`)
+ return int(readed), io.EOF
+ } else {
+ r.offset_of_sector = r.sat[r.offset_of_sector]
+ }
+ if r.offset_of_sector == ENDOFCHAIN {
+ return int(readed), io.EOF
+ }
+ pos := r.sector_pos(r.offset_of_sector, r.size_sector) + r.offset_in_sector
+ r.reader.Seek(int64(pos), 0)
+ }
+ }
+ if n, err := r.reader.Read(p[readed:len(p)]); err == nil {
+ r.offset_in_sector += uint32(n)
+ if DEBUG {
+ log.Printf("pos:%x,bit:% X", r.offset_of_sector, p)
+ }
+ return len(p), nil
+ } else {
+ return int(readed) + n, err
+ }
+
+}
+
+func (r *StreamReader) Seek(offset int64, whence int) (offset_result int64, err error) {
+
+ if whence == 0 {
+ r.offset_of_sector = r.start
+ r.offset_in_sector = 0
+ r.offset = offset
+ } else {
+ r.offset += offset
+ }
+
+ if r.offset_of_sector == ENDOFCHAIN {
+ return r.offset, io.EOF
+ }
+
+ for offset >= int64(r.size_sector-r.offset_in_sector) {
+ r.offset_of_sector = r.sat[r.offset_of_sector]
+ offset -= int64(r.size_sector - r.offset_in_sector)
+ r.offset_in_sector = 0
+ if r.offset_of_sector == ENDOFCHAIN {
+ err = io.EOF
+ goto return_res
+ }
+ }
+
+ if r.size <= r.offset {
+ err = io.EOF
+ r.offset = r.size
+ } else {
+ r.offset_in_sector += uint32(offset)
+ }
+return_res:
+ offset_result = r.offset
+ return
+}
diff --git a/godo/office/ole2/stream_reader_test.go b/godo/office/ole2/stream_reader_test.go
new file mode 100644
index 0000000..8cadc15
--- /dev/null
+++ b/godo/office/ole2/stream_reader_test.go
@@ -0,0 +1,75 @@
+package ole2
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+)
+
+func TestRead(t *testing.T) {
+ bts := make([]byte, 1<<10)
+ for i := 0; i < 1<<10; i++ {
+ bts[i] = byte(i)
+ }
+ ole := &Ole{nil, 8, 1, []uint32{2, 1, ENDOFCHAIN}, []uint32{}, []File{}, bytes.NewReader(bts)}
+ r := ole.stream_read(0, 30)
+ res := make([]byte, 14)
+ fmt.Println(r.Read(res))
+ fmt.Println(res)
+}
+
+func TestSeek(t *testing.T) {
+ bts := make([]byte, 1<<10)
+ for i := 0; i < 1<<10; i++ {
+ bts[i] = byte(i)
+ }
+ ole := &Ole{nil, 8, 1, []uint32{2, 1, ENDOFCHAIN}, []uint32{}, []File{}, bytes.NewReader(bts)}
+ r := ole.stream_read(0, 30)
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+}
+
+func TestSeek1(t *testing.T) {
+ bts := make([]byte, 1<<10)
+ for i := 0; i < 1<<10; i++ {
+ bts[i] = byte(i)
+ }
+ ole := &Ole{nil, 8, 1, []uint32{2, 1, ENDOFCHAIN}, []uint32{}, []File{}, bytes.NewReader(bts)}
+ r := ole.stream_read(0, 30)
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+ fmt.Println(r.Seek(2, 1))
+}
diff --git a/godo/office/pdf/README.md b/godo/office/pdf/README.md
new file mode 100644
index 0000000..5d72176
--- /dev/null
+++ b/godo/office/pdf/README.md
@@ -0,0 +1,138 @@
+# PDF Reader
+
+[](https://webuild.community)
+
+A simple Go library which enables reading PDF files. Forked from https://github.com/rsc/pdf
+
+Features
+ - Get plain text content (without format)
+ - Get Content (including all font and formatting information)
+
+## Install:
+
+`go get -u github.com/ledongthuc/pdf`
+
+
+## Read plain text
+
+```golang
+package main
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/ledongthuc/pdf"
+)
+
+func main() {
+ pdf.DebugOn = true
+ content, err := readPdf("test.pdf") // Read local pdf file
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println(content)
+ return
+}
+
+func readPdf(path string) (string, error) {
+ f, r, err := pdf.Open(path)
+ // remember close file
+ defer f.Close()
+ if err != nil {
+ return "", err
+ }
+ var buf bytes.Buffer
+ b, err := r.GetPlainText()
+ if err != nil {
+ return "", err
+ }
+ buf.ReadFrom(b)
+ return buf.String(), nil
+}
+```
+
+## Read all text with styles from PDF
+
+```golang
+func readPdf2(path string) (string, error) {
+ f, r, err := pdf.Open(path)
+ // remember close file
+ defer f.Close()
+ if err != nil {
+ return "", err
+ }
+ totalPage := r.NumPage()
+
+ for pageIndex := 1; pageIndex <= totalPage; pageIndex++ {
+ p := r.Page(pageIndex)
+ if p.V.IsNull() {
+ continue
+ }
+ var lastTextStyle pdf.Text
+ texts := p.Content().Text
+ for _, text := range texts {
+ if isSameSentence(text, lastTextStyle) {
+ lastTextStyle.S = lastTextStyle.S + text.S
+ } else {
+ fmt.Printf("Font: %s, Font-size: %f, x: %f, y: %f, content: %s \n", lastTextStyle.Font, lastTextStyle.FontSize, lastTextStyle.X, lastTextStyle.Y, lastTextStyle.S)
+ lastTextStyle = text
+ }
+ }
+ }
+ return "", nil
+}
+```
+
+
+## Read text grouped by rows
+
+```golang
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/ledongthuc/pdf"
+)
+
+func main() {
+ content, err := readPdf(os.Args[1]) // Read local pdf file
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println(content)
+ return
+}
+
+func readPdf(path string) (string, error) {
+ f, r, err := pdf.Open(path)
+ defer func() {
+ _ = f.Close()
+ }()
+ if err != nil {
+ return "", err
+ }
+ totalPage := r.NumPage()
+
+ for pageIndex := 1; pageIndex <= totalPage; pageIndex++ {
+ p := r.Page(pageIndex)
+ if p.V.IsNull() {
+ continue
+ }
+
+ rows, _ := p.GetTextByRow()
+ for _, row := range rows {
+ println(">>>> row: ", row.Position)
+ for _, word := range row.Content {
+ fmt.Println(word.S)
+ }
+ }
+ }
+ return "", nil
+}
+```
+
+## Demo
+
diff --git a/godo/office/pdf/ascii85.go b/godo/office/pdf/ascii85.go
new file mode 100644
index 0000000..7d4b4a3
--- /dev/null
+++ b/godo/office/pdf/ascii85.go
@@ -0,0 +1,49 @@
+package pdf
+
+import (
+ "io"
+)
+
+type alphaReader struct {
+ reader io.Reader
+}
+
+func newAlphaReader(reader io.Reader) *alphaReader {
+ return &alphaReader{reader: reader}
+}
+
+func checkASCII85(r byte) byte {
+ if r >= '!' && r <= 'u' { // 33 <= ascii85 <=117
+ return r
+ }
+ if r == '~' {
+ return 1 // for marking possible end of data
+ }
+ return 0 // if non-ascii85
+}
+
+func (a *alphaReader) Read(p []byte) (int, error) {
+ n, err := a.reader.Read(p)
+ if err == io.EOF {
+ }
+ if err != nil {
+ return n, err
+ }
+ buf := make([]byte, n)
+ tilda := false
+ for i := 0; i < n; i++ {
+ char := checkASCII85(p[i])
+ if char == '>' && tilda { // end of data
+ break
+ }
+ if char > 1 {
+ buf[i] = char
+ }
+ if char == 1 {
+ tilda = true // possible end of data
+ }
+ }
+
+ copy(p, buf)
+ return n, nil
+}
diff --git a/godo/office/pdf/lex.go b/godo/office/pdf/lex.go
new file mode 100644
index 0000000..c3136c5
--- /dev/null
+++ b/godo/office/pdf/lex.go
@@ -0,0 +1,522 @@
+package pdf
+
+import (
+ "fmt"
+ "io"
+ "strconv"
+)
+
+// A token is a PDF token in the input stream, one of the following Go types:
+//
+// bool, a PDF boolean
+// int64, a PDF integer
+// float64, a PDF real
+// string, a PDF string literal
+// keyword, a PDF keyword
+// name, a PDF name without the leading slash
+type token interface{}
+
+// A name is a PDF name, without the leading slash.
+type name string
+
+// A keyword is a PDF keyword.
+// Delimiter tokens used in higher-level syntax,
+// such as "<<", ">>", "[", "]", "{", "}", are also treated as keywords.
+type keyword string
+
+// A buffer holds buffered input bytes from the PDF file.
+type buffer struct {
+ r io.Reader // source of data
+ buf []byte // buffered data
+ pos int // read index in buf
+ offset int64 // offset at end of buf; aka offset of next read
+ tmp []byte // scratch space for accumulating token
+ unread []token // queue of read but then unread tokens
+ allowEOF bool
+ allowObjptr bool
+ allowStream bool
+ eof bool
+ key []byte
+ useAES bool
+ objptr objptr
+}
+
+// newBuffer returns a new buffer reading from r at the given offset.
+func newBuffer(r io.Reader, offset int64) *buffer {
+ return &buffer{
+ r: r,
+ offset: offset,
+ buf: make([]byte, 0, 4096),
+ allowObjptr: true,
+ allowStream: true,
+ }
+}
+
+func (b *buffer) seek(offset int64) {
+ b.offset = offset
+ b.buf = b.buf[:0]
+ b.pos = 0
+ b.unread = b.unread[:0]
+}
+
+func (b *buffer) readByte() byte {
+ if b.pos >= len(b.buf) {
+ b.reload()
+ if b.pos >= len(b.buf) {
+ return '\n'
+ }
+ }
+ c := b.buf[b.pos]
+ b.pos++
+ return c
+}
+
+func (b *buffer) errorf(format string, args ...interface{}) {
+ panic(fmt.Errorf(format, args...))
+}
+
+func (b *buffer) reload() bool {
+ n := cap(b.buf) - int(b.offset%int64(cap(b.buf)))
+ n, err := b.r.Read(b.buf[:n])
+ if n == 0 && err != nil {
+ b.buf = b.buf[:0]
+ b.pos = 0
+ if b.allowEOF && err == io.EOF {
+ b.eof = true
+ return false
+ }
+ b.errorf("malformed PDF: reading at offset %d: %v", b.offset, err)
+ return false
+ }
+ b.offset += int64(n)
+ b.buf = b.buf[:n]
+ b.pos = 0
+ return true
+}
+
+func (b *buffer) seekForward(offset int64) {
+ for b.offset < offset {
+ if !b.reload() {
+ return
+ }
+ }
+ b.pos = len(b.buf) - int(b.offset-offset)
+}
+
+func (b *buffer) readOffset() int64 {
+ return b.offset - int64(len(b.buf)) + int64(b.pos)
+}
+
+func (b *buffer) unreadByte() {
+ if b.pos > 0 {
+ b.pos--
+ }
+}
+
+func (b *buffer) unreadToken(t token) {
+ b.unread = append(b.unread, t)
+}
+
+func (b *buffer) readToken() token {
+ if n := len(b.unread); n > 0 {
+ t := b.unread[n-1]
+ b.unread = b.unread[:n-1]
+ return t
+ }
+
+ // Find first non-space, non-comment byte.
+ c := b.readByte()
+ for {
+ if isSpace(c) {
+ if b.eof {
+ return io.EOF
+ }
+ c = b.readByte()
+ } else if c == '%' {
+ for c != '\r' && c != '\n' {
+ c = b.readByte()
+ }
+ } else {
+ break
+ }
+ }
+
+ switch c {
+ case '<':
+ if b.readByte() == '<' {
+ return keyword("<<")
+ }
+ b.unreadByte()
+ return b.readHexString()
+
+ case '(':
+ return b.readLiteralString()
+
+ case '[', ']', '{', '}':
+ return keyword(string(c))
+
+ case '/':
+ return b.readName()
+
+ case '>':
+ if b.readByte() == '>' {
+ return keyword(">>")
+ }
+ b.unreadByte()
+ fallthrough
+
+ default:
+ if isDelim(c) {
+ b.errorf("unexpected delimiter %#q", rune(c))
+ return nil
+ }
+ b.unreadByte()
+ return b.readKeyword()
+ }
+}
+
+func (b *buffer) readHexString() token {
+ tmp := b.tmp[:0]
+ for {
+ Loop:
+ c := b.readByte()
+ if c == '>' {
+ break
+ }
+ if isSpace(c) {
+ goto Loop
+ }
+ Loop2:
+ c2 := b.readByte()
+ if isSpace(c2) {
+ goto Loop2
+ }
+ x := unhex(c)<<4 | unhex(c2)
+ if x < 0 {
+ b.errorf("malformed hex string %c %c %s", c, c2, b.buf[b.pos:])
+ break
+ }
+ tmp = append(tmp, byte(x))
+ }
+ b.tmp = tmp
+ return string(tmp)
+}
+
+func unhex(b byte) int {
+ switch {
+ case '0' <= b && b <= '9':
+ return int(b) - '0'
+ case 'a' <= b && b <= 'f':
+ return int(b) - 'a' + 10
+ case 'A' <= b && b <= 'F':
+ return int(b) - 'A' + 10
+ }
+ return -1
+}
+
+func (b *buffer) readLiteralString() token {
+ tmp := b.tmp[:0]
+ depth := 1
+Loop:
+ for !b.eof {
+ c := b.readByte()
+ switch c {
+ default:
+ tmp = append(tmp, c)
+ case '(':
+ depth++
+ tmp = append(tmp, c)
+ case ')':
+ if depth--; depth == 0 {
+ break Loop
+ }
+ tmp = append(tmp, c)
+ case '\\':
+ switch c = b.readByte(); c {
+ default:
+ b.errorf("invalid escape sequence \\%c", c)
+ tmp = append(tmp, '\\', c)
+ case 'n':
+ tmp = append(tmp, '\n')
+ case 'r':
+ tmp = append(tmp, '\r')
+ case 'b':
+ tmp = append(tmp, '\b')
+ case 't':
+ tmp = append(tmp, '\t')
+ case 'f':
+ tmp = append(tmp, '\f')
+ case '(', ')', '\\':
+ tmp = append(tmp, c)
+ case '\r':
+ if b.readByte() != '\n' {
+ b.unreadByte()
+ }
+ fallthrough
+ case '\n':
+ // no append
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ x := int(c - '0')
+ for i := 0; i < 2; i++ {
+ c = b.readByte()
+ if c < '0' || c > '7' {
+ b.unreadByte()
+ break
+ }
+ x = x*8 + int(c-'0')
+ }
+ if x > 255 {
+ b.errorf("invalid octal escape \\%03o", x)
+ }
+ tmp = append(tmp, byte(x))
+ }
+ }
+ }
+ b.tmp = tmp
+ return string(tmp)
+}
+
+func (b *buffer) readName() token {
+ tmp := b.tmp[:0]
+ for {
+ c := b.readByte()
+ if isDelim(c) || isSpace(c) {
+ b.unreadByte()
+ break
+ }
+ if c == '#' {
+ x := unhex(b.readByte())<<4 | unhex(b.readByte())
+ if x < 0 {
+ b.errorf("malformed name")
+ }
+ tmp = append(tmp, byte(x))
+ continue
+ }
+ tmp = append(tmp, c)
+ }
+ b.tmp = tmp
+ return name(string(tmp))
+}
+
+func (b *buffer) readKeyword() token {
+ tmp := b.tmp[:0]
+ for {
+ c := b.readByte()
+ if isDelim(c) || isSpace(c) {
+ b.unreadByte()
+ break
+ }
+ tmp = append(tmp, c)
+ }
+ b.tmp = tmp
+ s := string(tmp)
+ switch {
+ case s == "true":
+ return true
+ case s == "false":
+ return false
+ case isInteger(s):
+ x, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ b.errorf("invalid integer %s", s)
+ }
+ return x
+ case isReal(s):
+ x, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ b.errorf("invalid real %s", s)
+ }
+ return x
+ }
+ return keyword(string(tmp))
+}
+
+func isInteger(s string) bool {
+ if len(s) > 0 && (s[0] == '+' || s[0] == '-') {
+ s = s[1:]
+ }
+ if len(s) == 0 {
+ return false
+ }
+ for _, c := range s {
+ if c < '0' || '9' < c {
+ return false
+ }
+ }
+ return true
+}
+
+func isReal(s string) bool {
+ if len(s) > 0 && (s[0] == '+' || s[0] == '-') {
+ s = s[1:]
+ }
+ if len(s) == 0 {
+ return false
+ }
+ ndot := 0
+ for _, c := range s {
+ if c == '.' {
+ ndot++
+ continue
+ }
+ if c < '0' || '9' < c {
+ return false
+ }
+ }
+ return ndot == 1
+}
+
+// An object is a PDF syntax object, one of the following Go types:
+//
+// bool, a PDF boolean
+// int64, a PDF integer
+// float64, a PDF real
+// string, a PDF string literal
+// name, a PDF name without the leading slash
+// dict, a PDF dictionary
+// array, a PDF array
+// stream, a PDF stream
+// objptr, a PDF object reference
+// objdef, a PDF object definition
+//
+// An object may also be nil, to represent the PDF null.
+type object interface{}
+
+type dict map[name]object
+
+type array []object
+
+type stream struct {
+ hdr dict
+ ptr objptr
+ offset int64
+}
+
+type objptr struct {
+ id uint32
+ gen uint16
+}
+
+type objdef struct {
+ ptr objptr
+ obj object
+}
+
+func (b *buffer) readObject() object {
+ tok := b.readToken()
+ if kw, ok := tok.(keyword); ok {
+ switch kw {
+ case "null":
+ return nil
+ case "<<":
+ return b.readDict()
+ case "[":
+ return b.readArray()
+ }
+ b.errorf("unexpected keyword %q parsing object", kw)
+ return nil
+ }
+
+ if str, ok := tok.(string); ok && b.key != nil && b.objptr.id != 0 {
+ tok = decryptString(b.key, b.useAES, b.objptr, str)
+ }
+
+ if !b.allowObjptr {
+ return tok
+ }
+
+ if t1, ok := tok.(int64); ok && int64(uint32(t1)) == t1 {
+ tok2 := b.readToken()
+ if t2, ok := tok2.(int64); ok && int64(uint16(t2)) == t2 {
+ tok3 := b.readToken()
+ switch tok3 {
+ case keyword("R"):
+ return objptr{uint32(t1), uint16(t2)}
+ case keyword("obj"):
+ old := b.objptr
+ b.objptr = objptr{uint32(t1), uint16(t2)}
+ obj := b.readObject()
+ if _, ok := obj.(stream); !ok {
+ tok4 := b.readToken()
+ if tok4 != keyword("endobj") {
+ b.errorf("missing endobj after indirect object definition")
+ b.unreadToken(tok4)
+ }
+ }
+ b.objptr = old
+ return objdef{objptr{uint32(t1), uint16(t2)}, obj}
+ }
+ b.unreadToken(tok3)
+ }
+ b.unreadToken(tok2)
+ }
+ return tok
+}
+
+func (b *buffer) readArray() object {
+ var x array
+ for {
+ tok := b.readToken()
+ if tok == nil || tok == keyword("]") {
+ break
+ }
+ b.unreadToken(tok)
+ x = append(x, b.readObject())
+ }
+ return x
+}
+
+func (b *buffer) readDict() object {
+ x := make(dict)
+ for {
+ tok := b.readToken()
+ if tok == nil || tok == keyword(">>") {
+ break
+ }
+ n, ok := tok.(name)
+ if !ok {
+ b.errorf("unexpected non-name key %T(%v) parsing dictionary", tok, tok)
+ continue
+ }
+ x[n] = b.readObject()
+ }
+
+ if !b.allowStream {
+ return x
+ }
+
+ tok := b.readToken()
+ if tok != keyword("stream") {
+ b.unreadToken(tok)
+ return x
+ }
+
+ switch b.readByte() {
+ case '\r':
+ if b.readByte() != '\n' {
+ b.unreadByte()
+ }
+ case '\n':
+ // ok
+ default:
+ b.errorf("stream keyword not followed by newline")
+ }
+
+ return stream{x, b.objptr, b.readOffset()}
+}
+
+func isSpace(b byte) bool {
+ switch b {
+ case '\x00', '\t', '\n', '\f', '\r', ' ':
+ return true
+ }
+ return false
+}
+
+func isDelim(b byte) bool {
+ switch b {
+ case '<', '>', '(', ')', '[', ']', '{', '}', '/', '%':
+ return true
+ }
+ return false
+}
diff --git a/godo/office/pdf/name.go b/godo/office/pdf/name.go
new file mode 100644
index 0000000..3825784
--- /dev/null
+++ b/godo/office/pdf/name.go
@@ -0,0 +1,4286 @@
+// Derived from http://www.jdawiseman.com/papers/trivia/character-entities.html
+
+package pdf
+
+var nameToRune = map[string]rune{
+ "nbspace": 0x00A0,
+ "nonbreakingspace": 0x00A0,
+ "exclamdown": 0x00A1,
+ "cent": 0x00A2,
+ "sterling": 0x00A3,
+ "currency": 0x00A4,
+ "yen": 0x00A5,
+ "brokenbar": 0x00A6,
+ "section": 0x00A7,
+ "dieresis": 0x00A8,
+ "copyright": 0x00A9,
+ "ordfeminine": 0x00AA,
+ "guillemotleft": 0x00AB,
+ "logicalnot": 0x00AC,
+ "sfthyphen": 0x00AD,
+ "softhyphen": 0x00AD,
+ "registered": 0x00AE,
+ "macron": 0x00AF,
+ "overscore": 0x00AF,
+ "degree": 0x00B0,
+ "plusminus": 0x00B1,
+ "twosuperior": 0x00B2,
+ "threesuperior": 0x00B3,
+ "acute": 0x00B4,
+ "mu": 0x00B5,
+ "mu1": 0x00B5,
+ "paragraph": 0x00B6,
+ "middot": 0x00B7,
+ "periodcentered": 0x00B7,
+ "cedilla": 0x00B8,
+ "onesuperior": 0x00B9,
+ "ordmasculine": 0x00BA,
+ "guillemotright": 0x00BB,
+ "onequarter": 0x00BC,
+ "onehalf": 0x00BD,
+ "threequarters": 0x00BE,
+ "questiondown": 0x00BF,
+ "Agrave": 0x00C0,
+ "Aacute": 0x00C1,
+ "Acircumflex": 0x00C2,
+ "Atilde": 0x00C3,
+ "Adieresis": 0x00C4,
+ "Aring": 0x00C5,
+ "AE": 0x00C6,
+ "Ccedilla": 0x00C7,
+ "Egrave": 0x00C8,
+ "Eacute": 0x00C9,
+ "Ecircumflex": 0x00CA,
+ "Edieresis": 0x00CB,
+ "Igrave": 0x00CC,
+ "Iacute": 0x00CD,
+ "Icircumflex": 0x00CE,
+ "Idieresis": 0x00CF,
+ "Eth": 0x00D0,
+ "Ntilde": 0x00D1,
+ "Ograve": 0x00D2,
+ "Oacute": 0x00D3,
+ "Ocircumflex": 0x00D4,
+ "Otilde": 0x00D5,
+ "Odieresis": 0x00D6,
+ "multiply": 0x00D7,
+ "Oslash": 0x00D8,
+ "Ugrave": 0x00D9,
+ "Uacute": 0x00DA,
+ "Ucircumflex": 0x00DB,
+ "Udieresis": 0x00DC,
+ "Yacute": 0x00DD,
+ "Thorn": 0x00DE,
+ "germandbls": 0x00DF,
+ "agrave": 0x00E0,
+ "aacute": 0x00E1,
+ "acircumflex": 0x00E2,
+ "atilde": 0x00E3,
+ "adieresis": 0x00E4,
+ "aring": 0x00E5,
+ "ae": 0x00E6,
+ "ccedilla": 0x00E7,
+ "egrave": 0x00E8,
+ "eacute": 0x00E9,
+ "ecircumflex": 0x00EA,
+ "edieresis": 0x00EB,
+ "igrave": 0x00EC,
+ "iacute": 0x00ED,
+ "icircumflex": 0x00EE,
+ "idieresis": 0x00EF,
+ "eth": 0x00F0,
+ "ntilde": 0x00F1,
+ "ograve": 0x00F2,
+ "oacute": 0x00F3,
+ "ocircumflex": 0x00F4,
+ "otilde": 0x00F5,
+ "odieresis": 0x00F6,
+ "divide": 0x00F7,
+ "oslash": 0x00F8,
+ "ugrave": 0x00F9,
+ "uacute": 0x00FA,
+ "ucircumflex": 0x00FB,
+ "udieresis": 0x00FC,
+ "yacute": 0x00FD,
+ "thorn": 0x00FE,
+ "ydieresis": 0x00FF,
+ "florin": 0x0192,
+ "Alpha": 0x0391,
+ "Beta": 0x0392,
+ "Gamma": 0x0393,
+ "Deltagreek": 0x0394,
+ "Epsilon": 0x0395,
+ "Zeta": 0x0396,
+ "Eta": 0x0397,
+ "Theta": 0x0398,
+ "Iota": 0x0399,
+ "Kappa": 0x039A,
+ "Lambda": 0x039B,
+ "Mu": 0x039C,
+ "Nu": 0x039D,
+ "Xi": 0x039E,
+ "Omicron": 0x039F,
+ "Pi": 0x03A0,
+ "Rho": 0x03A1,
+ "Sigma": 0x03A3,
+ "Tau": 0x03A4,
+ "Upsilon": 0x03A5,
+ "Phi": 0x03A6,
+ "Chi": 0x03A7,
+ "Psi": 0x03A8,
+ "Omegagreek": 0x03A9,
+ "alpha": 0x03B1,
+ "beta": 0x03B2,
+ "gamma": 0x03B3,
+ "delta": 0x03B4,
+ "epsilon": 0x03B5,
+ "zeta": 0x03B6,
+ "eta": 0x03B7,
+ "theta": 0x03B8,
+ "iota": 0x03B9,
+ "kappa": 0x03BA,
+ "lambda": 0x03BB,
+ "mugreek": 0x03BC,
+ "nu": 0x03BD,
+ "xi": 0x03BE,
+ "omicron": 0x03BF,
+ "pi": 0x03C0,
+ "rho": 0x03C1,
+ "sigma1": 0x03C2,
+ "sigmafinal": 0x03C2,
+ "sigma": 0x03C3,
+ "tau": 0x03C4,
+ "upsilon": 0x03C5,
+ "phi": 0x03C6,
+ "chi": 0x03C7,
+ "psi": 0x03C8,
+ "omega": 0x03C9,
+ "theta1": 0x03D1,
+ "thetasymbolgreek": 0x03D1,
+ "Upsilon1": 0x03D2,
+ "Upsilonhooksymbol": 0x03D2,
+ "omega1": 0x03D6,
+ "pisymbolgreek": 0x03D6,
+ "bullet": 0x2022,
+ "ellipsis": 0x2026,
+ "minute": 0x2032,
+ "second": 0x2033,
+ "overline": 0x203E,
+ "fraction": 0x2044,
+ "weierstrass": 0x2118,
+ "Ifraktur": 0x2111,
+ "Rfraktur": 0x211C,
+ "trademark": 0x2122,
+ "aleph": 0x2135,
+ "arrowleft": 0x2190,
+ "arrowup": 0x2191,
+ "arrowright": 0x2192,
+ "arrowdown": 0x2193,
+ "arrowboth": 0x2194,
+ "carriagereturn": 0x21B5,
+ "arrowdblleft": 0x21D0,
+ "arrowleftdbl": 0x21D0,
+ "arrowdblup": 0x21D1,
+ "arrowdblright": 0x21D2,
+ "dblarrowright": 0x21D2,
+ "arrowdbldown": 0x21D3,
+ "arrowdblboth": 0x21D4,
+ "dblarrowleft": 0x21D4,
+ "forall": 0x2200,
+ "universal": 0x2200,
+ "partialdiff": 0x2202,
+ "existential": 0x2203,
+ "thereexists": 0x2203,
+ "emptyset": 0x2205,
+ "gradient": 0x2207,
+ "nabla": 0x2207,
+ "element": 0x2208,
+ "notelement": 0x2209,
+ "notelementof": 0x2209,
+ "suchthat": 0x220B,
+ "product": 0x220F,
+ "summation": 0x2211,
+ "minus": 0x2212,
+ "asteriskmath": 0x2217,
+ "radical": 0x221A,
+ "proportional": 0x221D,
+ "infinity": 0x221E,
+ "angle": 0x2220,
+ "logicaland": 0x2227,
+ "logicalor": 0x2228,
+ "intersection": 0x2229,
+ "union": 0x222A,
+ "integral": 0x222B,
+ "therefore": 0x2234,
+ "similar": 0x223C,
+ "tildeoperator": 0x223C,
+ "approximatelyequal": 0x2245,
+ "congruent": 0x2245,
+ "approxequal": 0x2248,
+ "notequal": 0x2260,
+ "equivalence": 0x2261,
+ "lessequal": 0x2264,
+ "greaterequal": 0x2265,
+ "propersubset": 0x2282,
+ "subset": 0x2282,
+ "propersuperset": 0x2283,
+ "superset": 0x2283,
+ "notsubset": 0x2284,
+ "reflexsubset": 0x2286,
+ "subsetorequal": 0x2286,
+ "reflexsuperset": 0x2287,
+ "supersetorequal": 0x2287,
+ "circleplus": 0x2295,
+ "pluscircle": 0x2295,
+ "circlemultiply": 0x2297,
+ "timescircle": 0x2297,
+ "perpendicular": 0x22A5,
+ "dotmath": 0x22C5,
+ "angleleft": 0x2329,
+ "angleright": 0x232A,
+ "lozenge": 0x25CA,
+ "spade": 0x2660,
+ "spadesuitblack": 0x2660,
+ "club": 0x2663,
+ "clubsuitblack": 0x2663,
+ "heart": 0x2665,
+ "heartsuitblack": 0x2665,
+ "diamond": 0x2666,
+ "quotedbl": 0x0022,
+ "ampersand": 0x0026,
+ "less": 0x003C,
+ "greater": 0x003E,
+ "OE": 0x0152,
+ "oe": 0x0153,
+ "Scaron": 0x0160,
+ "scaron": 0x0161,
+ "Ydieresis": 0x0178,
+ "circumflex": 0x02C6,
+ "ilde": 0x02DC,
+ "tilde": 0x02DC,
+ "enspace": 0x2002,
+ "afii61664": 0x200C,
+ "zerowidthnonjoiner": 0x200C,
+ "afii301": 0x200D,
+ "afii299": 0x200E,
+ "afii300": 0x200F,
+ "endash": 0x2013,
+ "emdash": 0x2014,
+ "quoteleft": 0x2018,
+ "quoteright": 0x2019,
+ "quotesinglbase": 0x201A,
+ "quotedblleft": 0x201C,
+ "quotedblright": 0x201D,
+ "quotedblbase": 0x201E,
+ "dagger": 0x2020,
+ "daggerdbl": 0x2021,
+ "perthousand": 0x2030,
+ "guilsinglleft": 0x2039,
+ "guilsinglright": 0x203A,
+ "Euro": 0x20AC,
+ "controlSTX": 0x0001,
+ "controlSOT": 0x0002,
+ "controlETX": 0x0003,
+ "controlEOT": 0x0004,
+ "controlENQ": 0x0005,
+ "controlACK": 0x0006,
+ "controlBEL": 0x0007,
+ "controlBS": 0x0008,
+ "controlHT": 0x0009,
+ "controlLF": 0x000A,
+ "controlVT": 0x000B,
+ "controlFF": 0x000C,
+ "controlCR": 0x000D,
+ "controlSO": 0x000E,
+ "controlSI": 0x000F,
+ "controlDLE": 0x0010,
+ "controlDC1": 0x0011,
+ "controlDC2": 0x0012,
+ "controlDC3": 0x0013,
+ "controlDC4": 0x0014,
+ "controlNAK": 0x0015,
+ "controlSYN": 0x0016,
+ "controlETB": 0x0017,
+ "controlCAN": 0x0018,
+ "controlEM": 0x0019,
+ "controlSUB": 0x001A,
+ "controlESC": 0x001B,
+ "controlFS": 0x001C,
+ "controlGS": 0x001D,
+ "controlRS": 0x001E,
+ "controlUS": 0x001F,
+ "space": 0x0020,
+ "spacehackarabic": 0x0020,
+ "exclam": 0x0021,
+ "numbersign": 0x0023,
+ "dollar": 0x0024,
+ "percent": 0x0025,
+ "quotesingle": 0x0027,
+ "parenleft": 0x0028,
+ "parenright": 0x0029,
+ "asterisk": 0x002A,
+ "plus": 0x002B,
+ "comma": 0x002C,
+ "hyphen": 0x002D,
+ "period": 0x002E,
+ "slash": 0x002F,
+ "zero": 0x0030,
+ "one": 0x0031,
+ "two": 0x0032,
+ "three": 0x0033,
+ "four": 0x0034,
+ "five": 0x0035,
+ "six": 0x0036,
+ "seven": 0x0037,
+ "eight": 0x0038,
+ "nine": 0x0039,
+ "colon": 0x003A,
+ "semicolon": 0x003B,
+ "equal": 0x003D,
+ "question": 0x003F,
+ "at": 0x0040,
+ "A": 0x0041,
+ "B": 0x0042,
+ "C": 0x0043,
+ "D": 0x0044,
+ "E": 0x0045,
+ "F": 0x0046,
+ "G": 0x0047,
+ "H": 0x0048,
+ "I": 0x0049,
+ "J": 0x004A,
+ "K": 0x004B,
+ "L": 0x004C,
+ "M": 0x004D,
+ "N": 0x004E,
+ "O": 0x004F,
+ "P": 0x0050,
+ "Q": 0x0051,
+ "R": 0x0052,
+ "S": 0x0053,
+ "T": 0x0054,
+ "U": 0x0055,
+ "V": 0x0056,
+ "W": 0x0057,
+ "X": 0x0058,
+ "Y": 0x0059,
+ "Z": 0x005A,
+ "bracketleft": 0x005B,
+ "backslash": 0x005C,
+ "bracketright": 0x005D,
+ "asciicircum": 0x005E,
+ "underscore": 0x005F,
+ "grave": 0x0060,
+ "a": 0x0061,
+ "b": 0x0062,
+ "c": 0x0063,
+ "d": 0x0064,
+ "e": 0x0065,
+ "f": 0x0066,
+ "g": 0x0067,
+ "h": 0x0068,
+ "i": 0x0069,
+ "j": 0x006A,
+ "k": 0x006B,
+ "l": 0x006C,
+ "m": 0x006D,
+ "n": 0x006E,
+ "o": 0x006F,
+ "p": 0x0070,
+ "q": 0x0071,
+ "r": 0x0072,
+ "s": 0x0073,
+ "t": 0x0074,
+ "u": 0x0075,
+ "v": 0x0076,
+ "w": 0x0077,
+ "x": 0x0078,
+ "y": 0x0079,
+ "z": 0x007A,
+ "braceleft": 0x007B,
+ "bar": 0x007C,
+ "verticalbar": 0x007C,
+ "braceright": 0x007D,
+ "asciitilde": 0x007E,
+ "controlDEL": 0x007F,
+ "Amacron": 0x0100,
+ "amacron": 0x0101,
+ "Abreve": 0x0102,
+ "abreve": 0x0103,
+ "Aogonek": 0x0104,
+ "aogonek": 0x0105,
+ "Cacute": 0x0106,
+ "cacute": 0x0107,
+ "Ccircumflex": 0x0108,
+ "ccircumflex": 0x0109,
+ "Cdot": 0x010A,
+ "Cdotaccent": 0x010A,
+ "cdot": 0x010B,
+ "cdotaccent": 0x010B,
+ "Ccaron": 0x010C,
+ "ccaron": 0x010D,
+ "Dcaron": 0x010E,
+ "dcaron": 0x010F,
+ "Dcroat": 0x0110,
+ "Dslash": 0x0110,
+ "dcroat": 0x0111,
+ "dmacron": 0x0111,
+ "Emacron": 0x0112,
+ "emacron": 0x0113,
+ "Ebreve": 0x0114,
+ "ebreve": 0x0115,
+ "Edot": 0x0116,
+ "Edotaccent": 0x0116,
+ "edot": 0x0117,
+ "edotaccent": 0x0117,
+ "Eogonek": 0x0118,
+ "eogonek": 0x0119,
+ "Ecaron": 0x011A,
+ "ecaron": 0x011B,
+ "Gcircumflex": 0x011C,
+ "gcircumflex": 0x011D,
+ "Gbreve": 0x011E,
+ "gbreve": 0x011F,
+ "Gdot": 0x0120,
+ "Gdotaccent": 0x0120,
+ "gdot": 0x0121,
+ "gdotaccent": 0x0121,
+ "Gcedilla": 0x0122,
+ "Gcommaaccent": 0x0122,
+ "gcedilla": 0x0123,
+ "gcommaaccent": 0x0123,
+ "Hcircumflex": 0x0124,
+ "hcircumflex": 0x0125,
+ "Hbar": 0x0126,
+ "hbar": 0x0127,
+ "Itilde": 0x0128,
+ "itilde": 0x0129,
+ "Imacron": 0x012A,
+ "imacron": 0x012B,
+ "Ibreve": 0x012C,
+ "ibreve": 0x012D,
+ "Iogonek": 0x012E,
+ "iogonek": 0x012F,
+ "Idot": 0x0130,
+ "Idotaccent": 0x0130,
+ "dotlessi": 0x0131,
+ "IJ": 0x0132,
+ "ij": 0x0133,
+ "Jcircumflex": 0x0134,
+ "jcircumflex": 0x0135,
+ "Kcedilla": 0x0136,
+ "Kcommaaccent": 0x0136,
+ "kcedilla": 0x0137,
+ "kcommaaccent": 0x0137,
+ "kgreenlandic": 0x0138,
+ "Lacute": 0x0139,
+ "lacute": 0x013A,
+ "Lcedilla": 0x013B,
+ "Lcommaaccent": 0x013B,
+ "lcedilla": 0x013C,
+ "lcommaaccent": 0x013C,
+ "Lcaron": 0x013D,
+ "lcaron": 0x013E,
+ "Ldot": 0x013F,
+ "Ldotaccent": 0x013F,
+ "ldot": 0x0140,
+ "ldotaccent": 0x0140,
+ "Lslash": 0x0141,
+ "lslash": 0x0142,
+ "Nacute": 0x0143,
+ "nacute": 0x0144,
+ "Ncedilla": 0x0145,
+ "Ncommaaccent": 0x0145,
+ "ncedilla": 0x0146,
+ "ncommaaccent": 0x0146,
+ "Ncaron": 0x0147,
+ "ncaron": 0x0148,
+ "napostrophe": 0x0149,
+ "quoterightn": 0x0149,
+ "Eng": 0x014A,
+ "eng": 0x014B,
+ "Omacron": 0x014C,
+ "omacron": 0x014D,
+ "Obreve": 0x014E,
+ "obreve": 0x014F,
+ "Odblacute": 0x0150,
+ "Ohungarumlaut": 0x0150,
+ "odblacute": 0x0151,
+ "ohungarumlaut": 0x0151,
+ "Racute": 0x0154,
+ "racute": 0x0155,
+ "Rcedilla": 0x0156,
+ "Rcommaaccent": 0x0156,
+ "rcedilla": 0x0157,
+ "rcommaaccent": 0x0157,
+ "Rcaron": 0x0158,
+ "rcaron": 0x0159,
+ "Sacute": 0x015A,
+ "sacute": 0x015B,
+ "Scircumflex": 0x015C,
+ "scircumflex": 0x015D,
+ "Scedilla": 0x015E,
+ "scedilla": 0x015F,
+ "Tcedilla": 0x0162,
+ "Tcommaaccent": 0x0162,
+ "tcedilla": 0x0163,
+ "tcommaaccent": 0x0163,
+ "Tcaron": 0x0164,
+ "tcaron": 0x0165,
+ "Tbar": 0x0166,
+ "tbar": 0x0167,
+ "Utilde": 0x0168,
+ "utilde": 0x0169,
+ "Umacron": 0x016A,
+ "umacron": 0x016B,
+ "Ubreve": 0x016C,
+ "ubreve": 0x016D,
+ "Uring": 0x016E,
+ "uring": 0x016F,
+ "Udblacute": 0x0170,
+ "Uhungarumlaut": 0x0170,
+ "udblacute": 0x0171,
+ "uhungarumlaut": 0x0171,
+ "Uogonek": 0x0172,
+ "uogonek": 0x0173,
+ "Wcircumflex": 0x0174,
+ "wcircumflex": 0x0175,
+ "Ycircumflex": 0x0176,
+ "ycircumflex": 0x0177,
+ "Zacute": 0x0179,
+ "zacute": 0x017A,
+ "Zdot": 0x017B,
+ "Zdotaccent": 0x017B,
+ "zdot": 0x017C,
+ "zdotaccent": 0x017C,
+ "Zcaron": 0x017D,
+ "zcaron": 0x017E,
+ "longs": 0x017F,
+ "slong": 0x017F,
+ "bstroke": 0x0180,
+ "Bhook": 0x0181,
+ "Btopbar": 0x0182,
+ "btopbar": 0x0183,
+ "Tonesix": 0x0184,
+ "tonesix": 0x0185,
+ "Oopen": 0x0186,
+ "Chook": 0x0187,
+ "chook": 0x0188,
+ "Dafrican": 0x0189,
+ "Dhook": 0x018A,
+ "Dtopbar": 0x018B,
+ "dtopbar": 0x018C,
+ "deltaturned": 0x018D,
+ "Ereversed": 0x018E,
+ "Schwa": 0x018F,
+ "Eopen": 0x0190,
+ "Fhook": 0x0191,
+ "Ghook": 0x0193,
+ "Gammaafrican": 0x0194,
+ "hv": 0x0195,
+ "Iotaafrican": 0x0196,
+ "Istroke": 0x0197,
+ "Khook": 0x0198,
+ "khook": 0x0199,
+ "lbar": 0x019A,
+ "lambdastroke": 0x019B,
+ "Mturned": 0x019C,
+ "Nhookleft": 0x019D,
+ "nlegrightlong": 0x019E,
+ "Ocenteredtilde": 0x019F,
+ "Ohorn": 0x01A0,
+ "ohorn": 0x01A1,
+ "Oi": 0x01A2,
+ "oi": 0x01A3,
+ "Phook": 0x01A4,
+ "phook": 0x01A5,
+ "yr": 0x01A6,
+ "Tonetwo": 0x01A7,
+ "tonetwo": 0x01A8,
+ "Esh": 0x01A9,
+ "eshreversedloop": 0x01AA,
+ "tpalatalhook": 0x01AB,
+ "Thook": 0x01AC,
+ "thook": 0x01AD,
+ "Tretroflexhook": 0x01AE,
+ "Uhorn": 0x01AF,
+ "uhorn": 0x01B0,
+ "Upsilonafrican": 0x01B1,
+ "Vhook": 0x01B2,
+ "Yhook": 0x01B3,
+ "yhook": 0x01B4,
+ "Zstroke": 0x01B5,
+ "zstroke": 0x01B6,
+ "Ezh": 0x01B7,
+ "Ezhreversed": 0x01B8,
+ "ezhreversed": 0x01B9,
+ "ezhtail": 0x01BA,
+ "twostroke": 0x01BB,
+ "Tonefive": 0x01BC,
+ "tonefive": 0x01BD,
+ "glottalinvertedstroke": 0x01BE,
+ "wynn": 0x01BF,
+ "clickdental": 0x01C0,
+ "clicklateral": 0x01C1,
+ "clickalveolar": 0x01C2,
+ "clickretroflex": 0x01C3,
+ "DZcaron": 0x01C4,
+ "Dzcaron": 0x01C5,
+ "dzcaron": 0x01C6,
+ "LJ": 0x01C7,
+ "Lj": 0x01C8,
+ "lj": 0x01C9,
+ "NJ": 0x01CA,
+ "Nj": 0x01CB,
+ "nj": 0x01CC,
+ "Acaron": 0x01CD,
+ "acaron": 0x01CE,
+ "Icaron": 0x01CF,
+ "icaron": 0x01D0,
+ "Ocaron": 0x01D1,
+ "ocaron": 0x01D2,
+ "Ucaron": 0x01D3,
+ "ucaron": 0x01D4,
+ "Udieresismacron": 0x01D5,
+ "udieresismacron": 0x01D6,
+ "Udieresisacute": 0x01D7,
+ "udieresisacute": 0x01D8,
+ "Udieresiscaron": 0x01D9,
+ "udieresiscaron": 0x01DA,
+ "Udieresisgrave": 0x01DB,
+ "udieresisgrave": 0x01DC,
+ "eturned": 0x01DD,
+ "Adieresismacron": 0x01DE,
+ "adieresismacron": 0x01DF,
+ "Adotmacron": 0x01E0,
+ "adotmacron": 0x01E1,
+ "AEmacron": 0x01E2,
+ "aemacron": 0x01E3,
+ "Gstroke": 0x01E4,
+ "gstroke": 0x01E5,
+ "Gcaron": 0x01E6,
+ "gcaron": 0x01E7,
+ "Kcaron": 0x01E8,
+ "kcaron": 0x01E9,
+ "Oogonek": 0x01EA,
+ "oogonek": 0x01EB,
+ "Oogonekmacron": 0x01EC,
+ "oogonekmacron": 0x01ED,
+ "Ezhcaron": 0x01EE,
+ "ezhcaron": 0x01EF,
+ "jcaron": 0x01F0,
+ "DZ": 0x01F1,
+ "Dz": 0x01F2,
+ "dz": 0x01F3,
+ "Gacute": 0x01F4,
+ "gacute": 0x01F5,
+ "Aringacute": 0x01FA,
+ "aringacute": 0x01FB,
+ "AEacute": 0x01FC,
+ "aeacute": 0x01FD,
+ "Oslashacute": 0x01FE,
+ "Ostrokeacute": 0x01FE,
+ "oslashacute": 0x01FF,
+ "ostrokeacute": 0x01FF,
+ "Adblgrave": 0x0200,
+ "adblgrave": 0x0201,
+ "Ainvertedbreve": 0x0202,
+ "ainvertedbreve": 0x0203,
+ "Edblgrave": 0x0204,
+ "edblgrave": 0x0205,
+ "Einvertedbreve": 0x0206,
+ "einvertedbreve": 0x0207,
+ "Idblgrave": 0x0208,
+ "idblgrave": 0x0209,
+ "Iinvertedbreve": 0x020A,
+ "iinvertedbreve": 0x020B,
+ "Odblgrave": 0x020C,
+ "odblgrave": 0x020D,
+ "Oinvertedbreve": 0x020E,
+ "oinvertedbreve": 0x020F,
+ "Rdblgrave": 0x0210,
+ "rdblgrave": 0x0211,
+ "Rinvertedbreve": 0x0212,
+ "rinvertedbreve": 0x0213,
+ "Udblgrave": 0x0214,
+ "udblgrave": 0x0215,
+ "Uinvertedbreve": 0x0216,
+ "uinvertedbreve": 0x0217,
+ "Scommaaccent": 0x0218,
+ "scommaaccent": 0x0219,
+ "aturned": 0x0250,
+ "ascript": 0x0251,
+ "ascriptturned": 0x0252,
+ "bhook": 0x0253,
+ "oopen": 0x0254,
+ "ccurl": 0x0255,
+ "dtail": 0x0256,
+ "dhook": 0x0257,
+ "ereversed": 0x0258,
+ "schwa": 0x0259,
+ "schwahook": 0x025A,
+ "eopen": 0x025B,
+ "eopenreversed": 0x025C,
+ "eopenreversedhook": 0x025D,
+ "eopenreversedclosed": 0x025E,
+ "jdotlessstroke": 0x025F,
+ "ghook": 0x0260,
+ "gscript": 0x0261,
+ "gammalatinsmall": 0x0263,
+ "ramshorn": 0x0264,
+ "hturned": 0x0265,
+ "hhook": 0x0266,
+ "henghook": 0x0267,
+ "istroke": 0x0268,
+ "iotalatin": 0x0269,
+ "lmiddletilde": 0x026B,
+ "lbelt": 0x026C,
+ "lhookretroflex": 0x026D,
+ "lezh": 0x026E,
+ "mturned": 0x026F,
+ "mlonglegturned": 0x0270,
+ "mhook": 0x0271,
+ "nhookleft": 0x0272,
+ "nhookretroflex": 0x0273,
+ "obarred": 0x0275,
+ "omegalatinclosed": 0x0277,
+ "philatin": 0x0278,
+ "rturned": 0x0279,
+ "rlonglegturned": 0x027A,
+ "rhookturned": 0x027B,
+ "rlongleg": 0x027C,
+ "rhook": 0x027D,
+ "rfishhook": 0x027E,
+ "rfishhookreversed": 0x027F,
+ "Rsmallinverted": 0x0281,
+ "shook": 0x0282,
+ "esh": 0x0283,
+ "dotlessjstrokehook": 0x0284,
+ "eshsquatreversed": 0x0285,
+ "eshcurl": 0x0286,
+ "tturned": 0x0287,
+ "tretroflexhook": 0x0288,
+ "ubar": 0x0289,
+ "upsilonlatin": 0x028A,
+ "vhook": 0x028B,
+ "vturned": 0x028C,
+ "wturned": 0x028D,
+ "yturned": 0x028E,
+ "zretroflexhook": 0x0290,
+ "zcurl": 0x0291,
+ "ezh": 0x0292,
+ "ezhcurl": 0x0293,
+ "glottalstop": 0x0294,
+ "glottalstopreversed": 0x0295,
+ "glottalstopinverted": 0x0296,
+ "cstretched": 0x0297,
+ "bilabialclick": 0x0298,
+ "eopenclosed": 0x029A,
+ "Gsmallhook": 0x029B,
+ "jcrossedtail": 0x029D,
+ "kturned": 0x029E,
+ "qhook": 0x02A0,
+ "glottalstopstroke": 0x02A1,
+ "glottalstopstrokereversed": 0x02A2,
+ "dzaltone": 0x02A3,
+ "dezh": 0x02A4,
+ "dzcurl": 0x02A5,
+ "ts": 0x02A6,
+ "tesh": 0x02A7,
+ "tccurl": 0x02A8,
+ "hsuperior": 0x02B0,
+ "hhooksuperior": 0x02B1,
+ "jsuperior": 0x02B2,
+ "rturnedsuperior": 0x02B4,
+ "rhookturnedsuperior": 0x02B5,
+ "Rsmallinvertedsuperior": 0x02B6,
+ "wsuperior": 0x02B7,
+ "ysuperior": 0x02B8,
+ "primemod": 0x02B9,
+ "dblprimemod": 0x02BA,
+ "commaturnedmod": 0x02BB,
+ "afii57929": 0x02BC,
+ "apostrophemod": 0x02BC,
+ "afii64937": 0x02BD,
+ "commareversedmod": 0x02BD,
+ "ringhalfright": 0x02BE,
+ "ringhalfleft": 0x02BF,
+ "glottalstopmod": 0x02C0,
+ "glottalstopreversedmod": 0x02C1,
+ "arrowheadleftmod": 0x02C2,
+ "arrowheadrightmod": 0x02C3,
+ "arrowheadupmod": 0x02C4,
+ "arrowheaddownmod": 0x02C5,
+ "caron": 0x02C7,
+ "verticallinemod": 0x02C8,
+ "firsttonechinese": 0x02C9,
+ "secondtonechinese": 0x02CA,
+ "fourthtonechinese": 0x02CB,
+ "verticallinelowmod": 0x02CC,
+ "macronlowmod": 0x02CD,
+ "gravelowmod": 0x02CE,
+ "acutelowmod": 0x02CF,
+ "colontriangularmod": 0x02D0,
+ "colontriangularhalfmod": 0x02D1,
+ "ringhalfrightcentered": 0x02D2,
+ "ringhalfleftcentered": 0x02D3,
+ "uptackmod": 0x02D4,
+ "downtackmod": 0x02D5,
+ "plusmod": 0x02D6,
+ "minusmod": 0x02D7,
+ "breve": 0x02D8,
+ "dotaccent": 0x02D9,
+ "ring": 0x02DA,
+ "ogonek": 0x02DB,
+ "hungarumlaut": 0x02DD,
+ "rhotichookmod": 0x02DE,
+ "gammasuperior": 0x02E0,
+ "xsuperior": 0x02E3,
+ "glottalstopreversedsuperior": 0x02E4,
+ "tonebarextrahighmod": 0x02E5,
+ "tonebarhighmod": 0x02E6,
+ "tonebarmidmod": 0x02E7,
+ "tonebarlowmod": 0x02E8,
+ "tonebarextralowmod": 0x02E9,
+ "gravecmb": 0x0300,
+ "gravecomb": 0x0300,
+ "acutecmb": 0x0301,
+ "acutecomb": 0x0301,
+ "circumflexcmb": 0x0302,
+ "tildecmb": 0x0303,
+ "tildecomb": 0x0303,
+ "macroncmb": 0x0304,
+ "overlinecmb": 0x0305,
+ "brevecmb": 0x0306,
+ "dotaccentcmb": 0x0307,
+ "dieresiscmb": 0x0308,
+ "hookabovecomb": 0x0309,
+ "hookcmb": 0x0309,
+ "ringcmb": 0x030A,
+ "hungarumlautcmb": 0x030B,
+ "caroncmb": 0x030C,
+ "verticallineabovecmb": 0x030D,
+ "dblverticallineabovecmb": 0x030E,
+ "dblgravecmb": 0x030F,
+ "candrabinducmb": 0x0310,
+ "breveinvertedcmb": 0x0311,
+ "commaturnedabovecmb": 0x0312,
+ "commaabovecmb": 0x0313,
+ "commareversedabovecmb": 0x0314,
+ "commaaboverightcmb": 0x0315,
+ "gravebelowcmb": 0x0316,
+ "acutebelowcmb": 0x0317,
+ "lefttackbelowcmb": 0x0318,
+ "righttackbelowcmb": 0x0319,
+ "leftangleabovecmb": 0x031A,
+ "horncmb": 0x031B,
+ "ringhalfleftbelowcmb": 0x031C,
+ "uptackbelowcmb": 0x031D,
+ "downtackbelowcmb": 0x031E,
+ "plusbelowcmb": 0x031F,
+ "minusbelowcmb": 0x0320,
+ "hookpalatalizedbelowcmb": 0x0321,
+ "hookretroflexbelowcmb": 0x0322,
+ "dotbelowcmb": 0x0323,
+ "dotbelowcomb": 0x0323,
+ "dieresisbelowcmb": 0x0324,
+ "ringbelowcmb": 0x0325,
+ "cedillacmb": 0x0327,
+ "ogonekcmb": 0x0328,
+ "verticallinebelowcmb": 0x0329,
+ "bridgebelowcmb": 0x032A,
+ "dblarchinvertedbelowcmb": 0x032B,
+ "caronbelowcmb": 0x032C,
+ "circumflexbelowcmb": 0x032D,
+ "brevebelowcmb": 0x032E,
+ "breveinvertedbelowcmb": 0x032F,
+ "tildebelowcmb": 0x0330,
+ "macronbelowcmb": 0x0331,
+ "lowlinecmb": 0x0332,
+ "dbllowlinecmb": 0x0333,
+ "tildeoverlaycmb": 0x0334,
+ "strokeshortoverlaycmb": 0x0335,
+ "strokelongoverlaycmb": 0x0336,
+ "solidusshortoverlaycmb": 0x0337,
+ "soliduslongoverlaycmb": 0x0338,
+ "ringhalfrightbelowcmb": 0x0339,
+ "bridgeinvertedbelowcmb": 0x033A,
+ "squarebelowcmb": 0x033B,
+ "seagullbelowcmb": 0x033C,
+ "xabovecmb": 0x033D,
+ "tildeverticalcmb": 0x033E,
+ "dbloverlinecmb": 0x033F,
+ "gravetonecmb": 0x0340,
+ "acutetonecmb": 0x0341,
+ "perispomenigreekcmb": 0x0342,
+ "koroniscmb": 0x0343,
+ "dialytikatonoscmb": 0x0344,
+ "ypogegrammenigreekcmb": 0x0345,
+ "tildedoublecmb": 0x0360,
+ "breveinverteddoublecmb": 0x0361,
+ "numeralsigngreek": 0x0374,
+ "numeralsignlowergreek": 0x0375,
+ "ypogegrammeni": 0x037A,
+ "questiongreek": 0x037E,
+ "tonos": 0x0384,
+ "dialytikatonos": 0x0385,
+ "dieresistonos": 0x0385,
+ "Alphatonos": 0x0386,
+ "anoteleia": 0x0387,
+ "Epsilontonos": 0x0388,
+ "Etatonos": 0x0389,
+ "Iotatonos": 0x038A,
+ "Omicrontonos": 0x038C,
+ "Upsilontonos": 0x038E,
+ "Omegatonos": 0x038F,
+ "iotadieresistonos": 0x0390,
+ "Iotadieresis": 0x03AA,
+ "Upsilondieresis": 0x03AB,
+ "alphatonos": 0x03AC,
+ "epsilontonos": 0x03AD,
+ "etatonos": 0x03AE,
+ "iotatonos": 0x03AF,
+ "upsilondieresistonos": 0x03B0,
+ "iotadieresis": 0x03CA,
+ "upsilondieresis": 0x03CB,
+ "omicrontonos": 0x03CC,
+ "upsilontonos": 0x03CD,
+ "omegatonos": 0x03CE,
+ "betasymbolgreek": 0x03D0,
+ "Upsilonacutehooksymbolgreek": 0x03D3,
+ "Upsilondieresishooksymbolgreek": 0x03D4,
+ "phi1": 0x03D5,
+ "phisymbolgreek": 0x03D5,
+ "Stigmagreek": 0x03DA,
+ "Digammagreek": 0x03DC,
+ "Koppagreek": 0x03DE,
+ "Sampigreek": 0x03E0,
+ "Sheicoptic": 0x03E2,
+ "sheicoptic": 0x03E3,
+ "Feicoptic": 0x03E4,
+ "feicoptic": 0x03E5,
+ "Kheicoptic": 0x03E6,
+ "kheicoptic": 0x03E7,
+ "Horicoptic": 0x03E8,
+ "horicoptic": 0x03E9,
+ "Gangiacoptic": 0x03EA,
+ "gangiacoptic": 0x03EB,
+ "Shimacoptic": 0x03EC,
+ "shimacoptic": 0x03ED,
+ "Deicoptic": 0x03EE,
+ "deicoptic": 0x03EF,
+ "kappasymbolgreek": 0x03F0,
+ "rhosymbolgreek": 0x03F1,
+ "sigmalunatesymbolgreek": 0x03F2,
+ "yotgreek": 0x03F3,
+ "Iocyrillic": 0x0401,
+ "afii10023": 0x0401,
+ "Djecyrillic": 0x0402,
+ "afii10051": 0x0402,
+ "Gjecyrillic": 0x0403,
+ "afii10052": 0x0403,
+ "Ecyrillic": 0x0404,
+ "afii10053": 0x0404,
+ "Dzecyrillic": 0x0405,
+ "afii10054": 0x0405,
+ "Icyrillic": 0x0406,
+ "afii10055": 0x0406,
+ "Yicyrillic": 0x0407,
+ "afii10056": 0x0407,
+ "Jecyrillic": 0x0408,
+ "afii10057": 0x0408,
+ "Ljecyrillic": 0x0409,
+ "afii10058": 0x0409,
+ "Njecyrillic": 0x040A,
+ "afii10059": 0x040A,
+ "Tshecyrillic": 0x040B,
+ "afii10060": 0x040B,
+ "Kjecyrillic": 0x040C,
+ "afii10061": 0x040C,
+ "Ushortcyrillic": 0x040E,
+ "afii10062": 0x040E,
+ "Dzhecyrillic": 0x040F,
+ "afii10145": 0x040F,
+ "Acyrillic": 0x0410,
+ "afii10017": 0x0410,
+ "Becyrillic": 0x0411,
+ "afii10018": 0x0411,
+ "Vecyrillic": 0x0412,
+ "afii10019": 0x0412,
+ "Gecyrillic": 0x0413,
+ "afii10020": 0x0413,
+ "Decyrillic": 0x0414,
+ "afii10021": 0x0414,
+ "Iecyrillic": 0x0415,
+ "afii10022": 0x0415,
+ "Zhecyrillic": 0x0416,
+ "afii10024": 0x0416,
+ "Zecyrillic": 0x0417,
+ "afii10025": 0x0417,
+ "Iicyrillic": 0x0418,
+ "afii10026": 0x0418,
+ "Iishortcyrillic": 0x0419,
+ "afii10027": 0x0419,
+ "Kacyrillic": 0x041A,
+ "afii10028": 0x041A,
+ "Elcyrillic": 0x041B,
+ "afii10029": 0x041B,
+ "Emcyrillic": 0x041C,
+ "afii10030": 0x041C,
+ "Encyrillic": 0x041D,
+ "afii10031": 0x041D,
+ "Ocyrillic": 0x041E,
+ "afii10032": 0x041E,
+ "Pecyrillic": 0x041F,
+ "afii10033": 0x041F,
+ "Ercyrillic": 0x0420,
+ "afii10034": 0x0420,
+ "Escyrillic": 0x0421,
+ "afii10035": 0x0421,
+ "Tecyrillic": 0x0422,
+ "afii10036": 0x0422,
+ "Ucyrillic": 0x0423,
+ "afii10037": 0x0423,
+ "Efcyrillic": 0x0424,
+ "afii10038": 0x0424,
+ "Khacyrillic": 0x0425,
+ "afii10039": 0x0425,
+ "Tsecyrillic": 0x0426,
+ "afii10040": 0x0426,
+ "Checyrillic": 0x0427,
+ "afii10041": 0x0427,
+ "Shacyrillic": 0x0428,
+ "afii10042": 0x0428,
+ "Shchacyrillic": 0x0429,
+ "afii10043": 0x0429,
+ "Hardsigncyrillic": 0x042A,
+ "afii10044": 0x042A,
+ "Yericyrillic": 0x042B,
+ "afii10045": 0x042B,
+ "Softsigncyrillic": 0x042C,
+ "afii10046": 0x042C,
+ "Ereversedcyrillic": 0x042D,
+ "afii10047": 0x042D,
+ "IUcyrillic": 0x042E,
+ "afii10048": 0x042E,
+ "IAcyrillic": 0x042F,
+ "afii10049": 0x042F,
+ "acyrillic": 0x0430,
+ "afii10065": 0x0430,
+ "afii10066": 0x0431,
+ "becyrillic": 0x0431,
+ "afii10067": 0x0432,
+ "vecyrillic": 0x0432,
+ "afii10068": 0x0433,
+ "gecyrillic": 0x0433,
+ "afii10069": 0x0434,
+ "decyrillic": 0x0434,
+ "afii10070": 0x0435,
+ "iecyrillic": 0x0435,
+ "afii10072": 0x0436,
+ "zhecyrillic": 0x0436,
+ "afii10073": 0x0437,
+ "zecyrillic": 0x0437,
+ "afii10074": 0x0438,
+ "iicyrillic": 0x0438,
+ "afii10075": 0x0439,
+ "iishortcyrillic": 0x0439,
+ "afii10076": 0x043A,
+ "kacyrillic": 0x043A,
+ "afii10077": 0x043B,
+ "elcyrillic": 0x043B,
+ "afii10078": 0x043C,
+ "emcyrillic": 0x043C,
+ "afii10079": 0x043D,
+ "encyrillic": 0x043D,
+ "afii10080": 0x043E,
+ "ocyrillic": 0x043E,
+ "afii10081": 0x043F,
+ "pecyrillic": 0x043F,
+ "afii10082": 0x0440,
+ "ercyrillic": 0x0440,
+ "afii10083": 0x0441,
+ "escyrillic": 0x0441,
+ "afii10084": 0x0442,
+ "tecyrillic": 0x0442,
+ "afii10085": 0x0443,
+ "ucyrillic": 0x0443,
+ "afii10086": 0x0444,
+ "efcyrillic": 0x0444,
+ "afii10087": 0x0445,
+ "khacyrillic": 0x0445,
+ "afii10088": 0x0446,
+ "tsecyrillic": 0x0446,
+ "afii10089": 0x0447,
+ "checyrillic": 0x0447,
+ "afii10090": 0x0448,
+ "shacyrillic": 0x0448,
+ "afii10091": 0x0449,
+ "shchacyrillic": 0x0449,
+ "afii10092": 0x044A,
+ "hardsigncyrillic": 0x044A,
+ "afii10093": 0x044B,
+ "yericyrillic": 0x044B,
+ "afii10094": 0x044C,
+ "softsigncyrillic": 0x044C,
+ "afii10095": 0x044D,
+ "ereversedcyrillic": 0x044D,
+ "afii10096": 0x044E,
+ "iucyrillic": 0x044E,
+ "afii10097": 0x044F,
+ "iacyrillic": 0x044F,
+ "afii10071": 0x0451,
+ "iocyrillic": 0x0451,
+ "afii10099": 0x0452,
+ "djecyrillic": 0x0452,
+ "afii10100": 0x0453,
+ "gjecyrillic": 0x0453,
+ "afii10101": 0x0454,
+ "ecyrillic": 0x0454,
+ "afii10102": 0x0455,
+ "dzecyrillic": 0x0455,
+ "afii10103": 0x0456,
+ "icyrillic": 0x0456,
+ "afii10104": 0x0457,
+ "yicyrillic": 0x0457,
+ "afii10105": 0x0458,
+ "jecyrillic": 0x0458,
+ "afii10106": 0x0459,
+ "ljecyrillic": 0x0459,
+ "afii10107": 0x045A,
+ "njecyrillic": 0x045A,
+ "afii10108": 0x045B,
+ "tshecyrillic": 0x045B,
+ "afii10109": 0x045C,
+ "kjecyrillic": 0x045C,
+ "afii10110": 0x045E,
+ "ushortcyrillic": 0x045E,
+ "afii10193": 0x045F,
+ "dzhecyrillic": 0x045F,
+ "Omegacyrillic": 0x0460,
+ "omegacyrillic": 0x0461,
+ "Yatcyrillic": 0x0462,
+ "afii10146": 0x0462,
+ "afii10194": 0x0463,
+ "yatcyrillic": 0x0463,
+ "Eiotifiedcyrillic": 0x0464,
+ "eiotifiedcyrillic": 0x0465,
+ "Yuslittlecyrillic": 0x0466,
+ "yuslittlecyrillic": 0x0467,
+ "Yuslittleiotifiedcyrillic": 0x0468,
+ "yuslittleiotifiedcyrillic": 0x0469,
+ "Yusbigcyrillic": 0x046A,
+ "yusbigcyrillic": 0x046B,
+ "Yusbigiotifiedcyrillic": 0x046C,
+ "yusbigiotifiedcyrillic": 0x046D,
+ "Ksicyrillic": 0x046E,
+ "ksicyrillic": 0x046F,
+ "Psicyrillic": 0x0470,
+ "psicyrillic": 0x0471,
+ "Fitacyrillic": 0x0472,
+ "afii10147": 0x0472,
+ "afii10195": 0x0473,
+ "fitacyrillic": 0x0473,
+ "Izhitsacyrillic": 0x0474,
+ "afii10148": 0x0474,
+ "afii10196": 0x0475,
+ "izhitsacyrillic": 0x0475,
+ "Izhitsadblgravecyrillic": 0x0476,
+ "izhitsadblgravecyrillic": 0x0477,
+ "Ukcyrillic": 0x0478,
+ "ukcyrillic": 0x0479,
+ "Omegaroundcyrillic": 0x047A,
+ "omegaroundcyrillic": 0x047B,
+ "Omegatitlocyrillic": 0x047C,
+ "omegatitlocyrillic": 0x047D,
+ "Otcyrillic": 0x047E,
+ "otcyrillic": 0x047F,
+ "Koppacyrillic": 0x0480,
+ "koppacyrillic": 0x0481,
+ "thousandcyrillic": 0x0482,
+ "titlocyrilliccmb": 0x0483,
+ "palatalizationcyrilliccmb": 0x0484,
+ "dasiapneumatacyrilliccmb": 0x0485,
+ "psilipneumatacyrilliccmb": 0x0486,
+ "Gheupturncyrillic": 0x0490,
+ "afii10050": 0x0490,
+ "afii10098": 0x0491,
+ "gheupturncyrillic": 0x0491,
+ "Ghestrokecyrillic": 0x0492,
+ "ghestrokecyrillic": 0x0493,
+ "Ghemiddlehookcyrillic": 0x0494,
+ "ghemiddlehookcyrillic": 0x0495,
+ "Zhedescendercyrillic": 0x0496,
+ "zhedescendercyrillic": 0x0497,
+ "Zedescendercyrillic": 0x0498,
+ "zedescendercyrillic": 0x0499,
+ "Kadescendercyrillic": 0x049A,
+ "kadescendercyrillic": 0x049B,
+ "Kaverticalstrokecyrillic": 0x049C,
+ "kaverticalstrokecyrillic": 0x049D,
+ "Kastrokecyrillic": 0x049E,
+ "kastrokecyrillic": 0x049F,
+ "Kabashkircyrillic": 0x04A0,
+ "kabashkircyrillic": 0x04A1,
+ "Endescendercyrillic": 0x04A2,
+ "endescendercyrillic": 0x04A3,
+ "Enghecyrillic": 0x04A4,
+ "enghecyrillic": 0x04A5,
+ "Pemiddlehookcyrillic": 0x04A6,
+ "pemiddlehookcyrillic": 0x04A7,
+ "Haabkhasiancyrillic": 0x04A8,
+ "haabkhasiancyrillic": 0x04A9,
+ "Esdescendercyrillic": 0x04AA,
+ "esdescendercyrillic": 0x04AB,
+ "Tedescendercyrillic": 0x04AC,
+ "tedescendercyrillic": 0x04AD,
+ "Ustraightcyrillic": 0x04AE,
+ "ustraightcyrillic": 0x04AF,
+ "Ustraightstrokecyrillic": 0x04B0,
+ "ustraightstrokecyrillic": 0x04B1,
+ "Hadescendercyrillic": 0x04B2,
+ "hadescendercyrillic": 0x04B3,
+ "Tetsecyrillic": 0x04B4,
+ "tetsecyrillic": 0x04B5,
+ "Chedescendercyrillic": 0x04B6,
+ "chedescendercyrillic": 0x04B7,
+ "Cheverticalstrokecyrillic": 0x04B8,
+ "cheverticalstrokecyrillic": 0x04B9,
+ "Shhacyrillic": 0x04BA,
+ "shhacyrillic": 0x04BB,
+ "Cheabkhasiancyrillic": 0x04BC,
+ "cheabkhasiancyrillic": 0x04BD,
+ "Chedescenderabkhasiancyrillic": 0x04BE,
+ "chedescenderabkhasiancyrillic": 0x04BF,
+ "palochkacyrillic": 0x04C0,
+ "Zhebrevecyrillic": 0x04C1,
+ "zhebrevecyrillic": 0x04C2,
+ "Kahookcyrillic": 0x04C3,
+ "kahookcyrillic": 0x04C4,
+ "Enhookcyrillic": 0x04C7,
+ "enhookcyrillic": 0x04C8,
+ "Chekhakassiancyrillic": 0x04CB,
+ "chekhakassiancyrillic": 0x04CC,
+ "Abrevecyrillic": 0x04D0,
+ "abrevecyrillic": 0x04D1,
+ "Adieresiscyrillic": 0x04D2,
+ "adieresiscyrillic": 0x04D3,
+ "Aiecyrillic": 0x04D4,
+ "aiecyrillic": 0x04D5,
+ "Iebrevecyrillic": 0x04D6,
+ "iebrevecyrillic": 0x04D7,
+ "Schwacyrillic": 0x04D8,
+ "afii10846": 0x04D9,
+ "schwacyrillic": 0x04D9,
+ "Schwadieresiscyrillic": 0x04DA,
+ "schwadieresiscyrillic": 0x04DB,
+ "Zhedieresiscyrillic": 0x04DC,
+ "zhedieresiscyrillic": 0x04DD,
+ "Zedieresiscyrillic": 0x04DE,
+ "zedieresiscyrillic": 0x04DF,
+ "Dzeabkhasiancyrillic": 0x04E0,
+ "dzeabkhasiancyrillic": 0x04E1,
+ "Imacroncyrillic": 0x04E2,
+ "imacroncyrillic": 0x04E3,
+ "Idieresiscyrillic": 0x04E4,
+ "idieresiscyrillic": 0x04E5,
+ "Odieresiscyrillic": 0x04E6,
+ "odieresiscyrillic": 0x04E7,
+ "Obarredcyrillic": 0x04E8,
+ "obarredcyrillic": 0x04E9,
+ "Obarreddieresiscyrillic": 0x04EA,
+ "obarreddieresiscyrillic": 0x04EB,
+ "Umacroncyrillic": 0x04EE,
+ "umacroncyrillic": 0x04EF,
+ "Udieresiscyrillic": 0x04F0,
+ "udieresiscyrillic": 0x04F1,
+ "Uhungarumlautcyrillic": 0x04F2,
+ "uhungarumlautcyrillic": 0x04F3,
+ "Chedieresiscyrillic": 0x04F4,
+ "chedieresiscyrillic": 0x04F5,
+ "Yerudieresiscyrillic": 0x04F8,
+ "yerudieresiscyrillic": 0x04F9,
+ "Aybarmenian": 0x0531,
+ "Benarmenian": 0x0532,
+ "Gimarmenian": 0x0533,
+ "Daarmenian": 0x0534,
+ "Echarmenian": 0x0535,
+ "Zaarmenian": 0x0536,
+ "Eharmenian": 0x0537,
+ "Etarmenian": 0x0538,
+ "Toarmenian": 0x0539,
+ "Zhearmenian": 0x053A,
+ "Iniarmenian": 0x053B,
+ "Liwnarmenian": 0x053C,
+ "Xeharmenian": 0x053D,
+ "Caarmenian": 0x053E,
+ "Kenarmenian": 0x053F,
+ "Hoarmenian": 0x0540,
+ "Jaarmenian": 0x0541,
+ "Ghadarmenian": 0x0542,
+ "Cheharmenian": 0x0543,
+ "Menarmenian": 0x0544,
+ "Yiarmenian": 0x0545,
+ "Nowarmenian": 0x0546,
+ "Shaarmenian": 0x0547,
+ "Voarmenian": 0x0548,
+ "Chaarmenian": 0x0549,
+ "Peharmenian": 0x054A,
+ "Jheharmenian": 0x054B,
+ "Raarmenian": 0x054C,
+ "Seharmenian": 0x054D,
+ "Vewarmenian": 0x054E,
+ "Tiwnarmenian": 0x054F,
+ "Reharmenian": 0x0550,
+ "Coarmenian": 0x0551,
+ "Yiwnarmenian": 0x0552,
+ "Piwrarmenian": 0x0553,
+ "Keharmenian": 0x0554,
+ "Oharmenian": 0x0555,
+ "Feharmenian": 0x0556,
+ "ringhalfleftarmenian": 0x0559,
+ "apostrophearmenian": 0x055A,
+ "emphasismarkarmenian": 0x055B,
+ "exclamarmenian": 0x055C,
+ "commaarmenian": 0x055D,
+ "questionarmenian": 0x055E,
+ "abbreviationmarkarmenian": 0x055F,
+ "aybarmenian": 0x0561,
+ "benarmenian": 0x0562,
+ "gimarmenian": 0x0563,
+ "daarmenian": 0x0564,
+ "echarmenian": 0x0565,
+ "zaarmenian": 0x0566,
+ "eharmenian": 0x0567,
+ "etarmenian": 0x0568,
+ "toarmenian": 0x0569,
+ "zhearmenian": 0x056A,
+ "iniarmenian": 0x056B,
+ "liwnarmenian": 0x056C,
+ "xeharmenian": 0x056D,
+ "caarmenian": 0x056E,
+ "kenarmenian": 0x056F,
+ "hoarmenian": 0x0570,
+ "jaarmenian": 0x0571,
+ "ghadarmenian": 0x0572,
+ "cheharmenian": 0x0573,
+ "menarmenian": 0x0574,
+ "yiarmenian": 0x0575,
+ "nowarmenian": 0x0576,
+ "shaarmenian": 0x0577,
+ "voarmenian": 0x0578,
+ "chaarmenian": 0x0579,
+ "peharmenian": 0x057A,
+ "jheharmenian": 0x057B,
+ "raarmenian": 0x057C,
+ "seharmenian": 0x057D,
+ "vewarmenian": 0x057E,
+ "tiwnarmenian": 0x057F,
+ "reharmenian": 0x0580,
+ "coarmenian": 0x0581,
+ "yiwnarmenian": 0x0582,
+ "piwrarmenian": 0x0583,
+ "keharmenian": 0x0584,
+ "oharmenian": 0x0585,
+ "feharmenian": 0x0586,
+ "echyiwnarmenian": 0x0587,
+ "periodarmenian": 0x0589,
+ "etnahtafoukhhebrew": 0x0591,
+ "etnahtafoukhlefthebrew": 0x0591,
+ "etnahtahebrew": 0x0591,
+ "etnahtalefthebrew": 0x0591,
+ "segoltahebrew": 0x0592,
+ "shalshelethebrew": 0x0593,
+ "zaqefqatanhebrew": 0x0594,
+ "zaqefgadolhebrew": 0x0595,
+ "tipehahebrew": 0x0596,
+ "tipehalefthebrew": 0x0596,
+ "reviahebrew": 0x0597,
+ "reviamugrashhebrew": 0x0597,
+ "zarqahebrew": 0x0598,
+ "pashtahebrew": 0x0599,
+ "yetivhebrew": 0x059A,
+ "tevirhebrew": 0x059B,
+ "tevirlefthebrew": 0x059B,
+ "gereshaccenthebrew": 0x059C,
+ "gereshmuqdamhebrew": 0x059D,
+ "gershayimaccenthebrew": 0x059E,
+ "qarneyparahebrew": 0x059F,
+ "telishagedolahebrew": 0x05A0,
+ "pazerhebrew": 0x05A1,
+ "munahhebrew": 0x05A3,
+ "munahlefthebrew": 0x05A3,
+ "mahapakhhebrew": 0x05A4,
+ "mahapakhlefthebrew": 0x05A4,
+ "merkhahebrew": 0x05A5,
+ "merkhalefthebrew": 0x05A5,
+ "merkhakefulahebrew": 0x05A6,
+ "merkhakefulalefthebrew": 0x05A6,
+ "dargahebrew": 0x05A7,
+ "dargalefthebrew": 0x05A7,
+ "qadmahebrew": 0x05A8,
+ "telishaqetanahebrew": 0x05A9,
+ "yerahbenyomohebrew": 0x05AA,
+ "yerahbenyomolefthebrew": 0x05AA,
+ "olehebrew": 0x05AB,
+ "iluyhebrew": 0x05AC,
+ "dehihebrew": 0x05AD,
+ "zinorhebrew": 0x05AE,
+ "masoracirclehebrew": 0x05AF,
+ "afii57799": 0x05B0,
+ "sheva": 0x05B0,
+ "sheva115": 0x05B0,
+ "sheva15": 0x05B0,
+ "sheva22": 0x05B0,
+ "sheva2e": 0x05B0,
+ "shevahebrew": 0x05B0,
+ "shevanarrowhebrew": 0x05B0,
+ "shevaquarterhebrew": 0x05B0,
+ "shevawidehebrew": 0x05B0,
+ "afii57801": 0x05B1,
+ "hatafsegol": 0x05B1,
+ "hatafsegol17": 0x05B1,
+ "hatafsegol24": 0x05B1,
+ "hatafsegol30": 0x05B1,
+ "hatafsegolhebrew": 0x05B1,
+ "hatafsegolnarrowhebrew": 0x05B1,
+ "hatafsegolquarterhebrew": 0x05B1,
+ "hatafsegolwidehebrew": 0x05B1,
+ "afii57800": 0x05B2,
+ "hatafpatah": 0x05B2,
+ "hatafpatah16": 0x05B2,
+ "hatafpatah23": 0x05B2,
+ "hatafpatah2f": 0x05B2,
+ "hatafpatahhebrew": 0x05B2,
+ "hatafpatahnarrowhebrew": 0x05B2,
+ "hatafpatahquarterhebrew": 0x05B2,
+ "hatafpatahwidehebrew": 0x05B2,
+ "afii57802": 0x05B3,
+ "hatafqamats": 0x05B3,
+ "hatafqamats1b": 0x05B3,
+ "hatafqamats28": 0x05B3,
+ "hatafqamats34": 0x05B3,
+ "hatafqamatshebrew": 0x05B3,
+ "hatafqamatsnarrowhebrew": 0x05B3,
+ "hatafqamatsquarterhebrew": 0x05B3,
+ "hatafqamatswidehebrew": 0x05B3,
+ "afii57793": 0x05B4,
+ "hiriq": 0x05B4,
+ "hiriq14": 0x05B4,
+ "hiriq21": 0x05B4,
+ "hiriq2d": 0x05B4,
+ "hiriqhebrew": 0x05B4,
+ "hiriqnarrowhebrew": 0x05B4,
+ "hiriqquarterhebrew": 0x05B4,
+ "hiriqwidehebrew": 0x05B4,
+ "afii57794": 0x05B5,
+ "tsere": 0x05B5,
+ "tsere12": 0x05B5,
+ "tsere1e": 0x05B5,
+ "tsere2b": 0x05B5,
+ "tserehebrew": 0x05B5,
+ "tserenarrowhebrew": 0x05B5,
+ "tserequarterhebrew": 0x05B5,
+ "tserewidehebrew": 0x05B5,
+ "afii57795": 0x05B6,
+ "segol": 0x05B6,
+ "segol13": 0x05B6,
+ "segol1f": 0x05B6,
+ "segol2c": 0x05B6,
+ "segolhebrew": 0x05B6,
+ "segolnarrowhebrew": 0x05B6,
+ "segolquarterhebrew": 0x05B6,
+ "segolwidehebrew": 0x05B6,
+ "afii57798": 0x05B7,
+ "patah": 0x05B7,
+ "patah11": 0x05B7,
+ "patah1d": 0x05B7,
+ "patah2a": 0x05B7,
+ "patahhebrew": 0x05B7,
+ "patahnarrowhebrew": 0x05B7,
+ "patahquarterhebrew": 0x05B7,
+ "patahwidehebrew": 0x05B7,
+ "afii57797": 0x05B8,
+ "qamats": 0x05B8,
+ "qamats10": 0x05B8,
+ "qamats1a": 0x05B8,
+ "qamats1c": 0x05B8,
+ "qamats27": 0x05B8,
+ "qamats29": 0x05B8,
+ "qamats33": 0x05B8,
+ "qamatsde": 0x05B8,
+ "qamatshebrew": 0x05B8,
+ "qamatsnarrowhebrew": 0x05B8,
+ "qamatsqatanhebrew": 0x05B8,
+ "qamatsqatannarrowhebrew": 0x05B8,
+ "qamatsqatanquarterhebrew": 0x05B8,
+ "qamatsqatanwidehebrew": 0x05B8,
+ "qamatsquarterhebrew": 0x05B8,
+ "qamatswidehebrew": 0x05B8,
+ "afii57806": 0x05B9,
+ "holam": 0x05B9,
+ "holam19": 0x05B9,
+ "holam26": 0x05B9,
+ "holam32": 0x05B9,
+ "holamhebrew": 0x05B9,
+ "holamnarrowhebrew": 0x05B9,
+ "holamquarterhebrew": 0x05B9,
+ "holamwidehebrew": 0x05B9,
+ "afii57796": 0x05BB,
+ "qubuts": 0x05BB,
+ "qubuts18": 0x05BB,
+ "qubuts25": 0x05BB,
+ "qubuts31": 0x05BB,
+ "qubutshebrew": 0x05BB,
+ "qubutsnarrowhebrew": 0x05BB,
+ "qubutsquarterhebrew": 0x05BB,
+ "qubutswidehebrew": 0x05BB,
+ "afii57807": 0x05BC,
+ "dagesh": 0x05BC,
+ "dageshhebrew": 0x05BC,
+ "afii57839": 0x05BD,
+ "siluqhebrew": 0x05BD,
+ "siluqlefthebrew": 0x05BD,
+ "afii57645": 0x05BE,
+ "maqafhebrew": 0x05BE,
+ "afii57841": 0x05BF,
+ "rafe": 0x05BF,
+ "rafehebrew": 0x05BF,
+ "afii57842": 0x05C0,
+ "paseqhebrew": 0x05C0,
+ "afii57804": 0x05C1,
+ "shindothebrew": 0x05C1,
+ "afii57803": 0x05C2,
+ "sindothebrew": 0x05C2,
+ "afii57658": 0x05C3,
+ "sofpasuqhebrew": 0x05C3,
+ "upperdothebrew": 0x05C4,
+ "afii57664": 0x05D0,
+ "alef": 0x05D0,
+ "alefhebrew": 0x05D0,
+ "afii57665": 0x05D1,
+ "bet": 0x05D1,
+ "bethebrew": 0x05D1,
+ "afii57666": 0x05D2,
+ "gimel": 0x05D2,
+ "gimelhebrew": 0x05D2,
+ "afii57667": 0x05D3,
+ "dalet": 0x05D3,
+ "dalethebrew": 0x05D3,
+ "daletsheva": 0x05D3,
+ "daletshevahebrew": 0x05D3,
+ "dalethatafsegol": 0x05D3,
+ "dalethatafsegolhebrew": 0x05D3,
+ "dalethatafpatah": 0x05D3,
+ "dalethatafpatahhebrew": 0x05D3,
+ "dalethiriq": 0x05D3,
+ "dalethiriqhebrew": 0x05D3,
+ "dalettsere": 0x05D3,
+ "dalettserehebrew": 0x05D3,
+ "daletsegol": 0x05D3,
+ "daletsegolhebrew": 0x05D3,
+ "daletpatah": 0x05D3,
+ "daletpatahhebrew": 0x05D3,
+ "daletqamats": 0x05D3,
+ "daletqamatshebrew": 0x05D3,
+ "daletholam": 0x05D3,
+ "daletholamhebrew": 0x05D3,
+ "daletqubuts": 0x05D3,
+ "daletqubutshebrew": 0x05D3,
+ "afii57668": 0x05D4,
+ "he": 0x05D4,
+ "hehebrew": 0x05D4,
+ "afii57669": 0x05D5,
+ "vav": 0x05D5,
+ "vavhebrew": 0x05D5,
+ "afii57670": 0x05D6,
+ "zayin": 0x05D6,
+ "zayinhebrew": 0x05D6,
+ "afii57671": 0x05D7,
+ "het": 0x05D7,
+ "hethebrew": 0x05D7,
+ "afii57672": 0x05D8,
+ "tet": 0x05D8,
+ "tethebrew": 0x05D8,
+ "afii57673": 0x05D9,
+ "yod": 0x05D9,
+ "yodhebrew": 0x05D9,
+ "afii57674": 0x05DA,
+ "finalkaf": 0x05DA,
+ "finalkafhebrew": 0x05DA,
+ "finalkafsheva": 0x05DA,
+ "finalkafshevahebrew": 0x05DA,
+ "finalkafqamats": 0x05DA,
+ "finalkafqamatshebrew": 0x05DA,
+ "afii57675": 0x05DB,
+ "kaf": 0x05DB,
+ "kafhebrew": 0x05DB,
+ "afii57676": 0x05DC,
+ "lamed": 0x05DC,
+ "lamedhebrew": 0x05DC,
+ "lamedholam": 0x05DC,
+ "lamedholamhebrew": 0x05DC,
+ "lamedholamdagesh": 0x05DC,
+ "lamedholamdageshhebrew": 0x05DC,
+ "afii57677": 0x05DD,
+ "finalmem": 0x05DD,
+ "finalmemhebrew": 0x05DD,
+ "afii57678": 0x05DE,
+ "mem": 0x05DE,
+ "memhebrew": 0x05DE,
+ "afii57679": 0x05DF,
+ "finalnun": 0x05DF,
+ "finalnunhebrew": 0x05DF,
+ "afii57680": 0x05E0,
+ "nun": 0x05E0,
+ "nunhebrew": 0x05E0,
+ "afii57681": 0x05E1,
+ "samekh": 0x05E1,
+ "samekhhebrew": 0x05E1,
+ "afii57682": 0x05E2,
+ "ayin": 0x05E2,
+ "ayinhebrew": 0x05E2,
+ "afii57683": 0x05E3,
+ "finalpe": 0x05E3,
+ "finalpehebrew": 0x05E3,
+ "afii57684": 0x05E4,
+ "pe": 0x05E4,
+ "pehebrew": 0x05E4,
+ "afii57685": 0x05E5,
+ "finaltsadi": 0x05E5,
+ "finaltsadihebrew": 0x05E5,
+ "afii57686": 0x05E6,
+ "tsadi": 0x05E6,
+ "tsadihebrew": 0x05E6,
+ "afii57687": 0x05E7,
+ "qof": 0x05E7,
+ "qofhebrew": 0x05E7,
+ "qofsheva": 0x05E7,
+ "qofshevahebrew": 0x05E7,
+ "qofhatafsegol": 0x05E7,
+ "qofhatafsegolhebrew": 0x05E7,
+ "qofhatafpatah": 0x05E7,
+ "qofhatafpatahhebrew": 0x05E7,
+ "qofhiriq": 0x05E7,
+ "qofhiriqhebrew": 0x05E7,
+ "qoftsere": 0x05E7,
+ "qoftserehebrew": 0x05E7,
+ "qofsegol": 0x05E7,
+ "qofsegolhebrew": 0x05E7,
+ "qofpatah": 0x05E7,
+ "qofpatahhebrew": 0x05E7,
+ "qofqamats": 0x05E7,
+ "qofqamatshebrew": 0x05E7,
+ "qofholam": 0x05E7,
+ "qofholamhebrew": 0x05E7,
+ "qofqubuts": 0x05E7,
+ "qofqubutshebrew": 0x05E7,
+ "afii57688": 0x05E8,
+ "resh": 0x05E8,
+ "reshhebrew": 0x05E8,
+ "reshsheva": 0x05E8,
+ "reshshevahebrew": 0x05E8,
+ "reshhatafsegol": 0x05E8,
+ "reshhatafsegolhebrew": 0x05E8,
+ "reshhatafpatah": 0x05E8,
+ "reshhatafpatahhebrew": 0x05E8,
+ "reshhiriq": 0x05E8,
+ "reshhiriqhebrew": 0x05E8,
+ "reshtsere": 0x05E8,
+ "reshtserehebrew": 0x05E8,
+ "reshsegol": 0x05E8,
+ "reshsegolhebrew": 0x05E8,
+ "reshpatah": 0x05E8,
+ "reshpatahhebrew": 0x05E8,
+ "reshqamats": 0x05E8,
+ "reshqamatshebrew": 0x05E8,
+ "reshholam": 0x05E8,
+ "reshholamhebrew": 0x05E8,
+ "reshqubuts": 0x05E8,
+ "reshqubutshebrew": 0x05E8,
+ "afii57689": 0x05E9,
+ "shin": 0x05E9,
+ "shinhebrew": 0x05E9,
+ "afii57690": 0x05EA,
+ "tav": 0x05EA,
+ "tavhebrew": 0x05EA,
+ "afii57716": 0x05F0,
+ "vavvavhebrew": 0x05F0,
+ "afii57717": 0x05F1,
+ "vavyodhebrew": 0x05F1,
+ "afii57718": 0x05F2,
+ "yodyodhebrew": 0x05F2,
+ "gereshhebrew": 0x05F3,
+ "gershayimhebrew": 0x05F4,
+ "afii57388": 0x060C,
+ "commaarabic": 0x060C,
+ "afii57403": 0x061B,
+ "semicolonarabic": 0x061B,
+ "afii57407": 0x061F,
+ "questionarabic": 0x061F,
+ "afii57409": 0x0621,
+ "hamzaarabic": 0x0621,
+ "hamzalowarabic": 0x0621,
+ "hamzafathatanarabic": 0x0621,
+ "hamzadammatanarabic": 0x0621,
+ "hamzalowkasratanarabic": 0x0621,
+ "hamzafathaarabic": 0x0621,
+ "hamzadammaarabic": 0x0621,
+ "hamzalowkasraarabic": 0x0621,
+ "hamzasukunarabic": 0x0621,
+ "afii57410": 0x0622,
+ "alefmaddaabovearabic": 0x0622,
+ "afii57411": 0x0623,
+ "alefhamzaabovearabic": 0x0623,
+ "afii57412": 0x0624,
+ "wawhamzaabovearabic": 0x0624,
+ "afii57413": 0x0625,
+ "alefhamzabelowarabic": 0x0625,
+ "afii57414": 0x0626,
+ "yehhamzaabovearabic": 0x0626,
+ "afii57415": 0x0627,
+ "alefarabic": 0x0627,
+ "afii57416": 0x0628,
+ "beharabic": 0x0628,
+ "afii57417": 0x0629,
+ "tehmarbutaarabic": 0x0629,
+ "afii57418": 0x062A,
+ "teharabic": 0x062A,
+ "afii57419": 0x062B,
+ "theharabic": 0x062B,
+ "afii57420": 0x062C,
+ "jeemarabic": 0x062C,
+ "afii57421": 0x062D,
+ "haharabic": 0x062D,
+ "afii57422": 0x062E,
+ "khaharabic": 0x062E,
+ "afii57423": 0x062F,
+ "dalarabic": 0x062F,
+ "afii57424": 0x0630,
+ "thalarabic": 0x0630,
+ "afii57425": 0x0631,
+ "reharabic": 0x0631,
+ "rehyehaleflamarabic": 0x0631,
+ "afii57426": 0x0632,
+ "zainarabic": 0x0632,
+ "afii57427": 0x0633,
+ "seenarabic": 0x0633,
+ "afii57428": 0x0634,
+ "sheenarabic": 0x0634,
+ "afii57429": 0x0635,
+ "sadarabic": 0x0635,
+ "afii57430": 0x0636,
+ "dadarabic": 0x0636,
+ "afii57431": 0x0637,
+ "taharabic": 0x0637,
+ "afii57432": 0x0638,
+ "zaharabic": 0x0638,
+ "afii57433": 0x0639,
+ "ainarabic": 0x0639,
+ "afii57434": 0x063A,
+ "ghainarabic": 0x063A,
+ "afii57440": 0x0640,
+ "kashidaautoarabic": 0x0640,
+ "kashidaautonosidebearingarabic": 0x0640,
+ "tatweelarabic": 0x0640,
+ "afii57441": 0x0641,
+ "feharabic": 0x0641,
+ "afii57442": 0x0642,
+ "qafarabic": 0x0642,
+ "afii57443": 0x0643,
+ "kafarabic": 0x0643,
+ "afii57444": 0x0644,
+ "lamarabic": 0x0644,
+ "afii57445": 0x0645,
+ "meemarabic": 0x0645,
+ "afii57446": 0x0646,
+ "noonarabic": 0x0646,
+ "afii57470": 0x0647,
+ "heharabic": 0x0647,
+ "afii57448": 0x0648,
+ "wawarabic": 0x0648,
+ "afii57449": 0x0649,
+ "alefmaksuraarabic": 0x0649,
+ "afii57450": 0x064A,
+ "yeharabic": 0x064A,
+ "afii57451": 0x064B,
+ "fathatanarabic": 0x064B,
+ "afii57452": 0x064C,
+ "dammatanaltonearabic": 0x064C,
+ "dammatanarabic": 0x064C,
+ "afii57453": 0x064D,
+ "kasratanarabic": 0x064D,
+ "afii57454": 0x064E,
+ "fathaarabic": 0x064E,
+ "fathalowarabic": 0x064E,
+ "afii57455": 0x064F,
+ "dammaarabic": 0x064F,
+ "dammalowarabic": 0x064F,
+ "afii57456": 0x0650,
+ "kasraarabic": 0x0650,
+ "afii57457": 0x0651,
+ "shaddaarabic": 0x0651,
+ "shaddafathatanarabic": 0x0651,
+ "afii57458": 0x0652,
+ "sukunarabic": 0x0652,
+ "afii57392": 0x0660,
+ "zeroarabic": 0x0660,
+ "zerohackarabic": 0x0660,
+ "afii57393": 0x0661,
+ "onearabic": 0x0661,
+ "onehackarabic": 0x0661,
+ "afii57394": 0x0662,
+ "twoarabic": 0x0662,
+ "twohackarabic": 0x0662,
+ "afii57395": 0x0663,
+ "threearabic": 0x0663,
+ "threehackarabic": 0x0663,
+ "afii57396": 0x0664,
+ "fourarabic": 0x0664,
+ "fourhackarabic": 0x0664,
+ "afii57397": 0x0665,
+ "fivearabic": 0x0665,
+ "fivehackarabic": 0x0665,
+ "afii57398": 0x0666,
+ "sixarabic": 0x0666,
+ "sixhackarabic": 0x0666,
+ "afii57399": 0x0667,
+ "sevenarabic": 0x0667,
+ "sevenhackarabic": 0x0667,
+ "afii57400": 0x0668,
+ "eightarabic": 0x0668,
+ "eighthackarabic": 0x0668,
+ "afii57401": 0x0669,
+ "ninearabic": 0x0669,
+ "ninehackarabic": 0x0669,
+ "afii57381": 0x066A,
+ "percentarabic": 0x066A,
+ "decimalseparatorarabic": 0x066B,
+ "decimalseparatorpersian": 0x066B,
+ "thousandsseparatorarabic": 0x066C,
+ "thousandsseparatorpersian": 0x066C,
+ "afii63167": 0x066D,
+ "asteriskaltonearabic": 0x066D,
+ "asteriskarabic": 0x066D,
+ "afii57511": 0x0679,
+ "tteharabic": 0x0679,
+ "afii57506": 0x067E,
+ "peharabic": 0x067E,
+ "afii57507": 0x0686,
+ "tcheharabic": 0x0686,
+ "afii57512": 0x0688,
+ "ddalarabic": 0x0688,
+ "afii57513": 0x0691,
+ "rreharabic": 0x0691,
+ "afii57508": 0x0698,
+ "jeharabic": 0x0698,
+ "afii57505": 0x06A4,
+ "veharabic": 0x06A4,
+ "afii57509": 0x06AF,
+ "gafarabic": 0x06AF,
+ "afii57514": 0x06BA,
+ "noonghunnaarabic": 0x06BA,
+ "haaltonearabic": 0x06C1,
+ "hehaltonearabic": 0x06C1,
+ "yehthreedotsbelowarabic": 0x06D1,
+ "afii57519": 0x06D2,
+ "yehbarreearabic": 0x06D2,
+ "afii57534": 0x06D5,
+ "zeropersian": 0x06F0,
+ "onepersian": 0x06F1,
+ "twopersian": 0x06F2,
+ "threepersian": 0x06F3,
+ "fourpersian": 0x06F4,
+ "fivepersian": 0x06F5,
+ "sixpersian": 0x06F6,
+ "sevenpersian": 0x06F7,
+ "eightpersian": 0x06F8,
+ "ninepersian": 0x06F9,
+ "candrabindudeva": 0x0901,
+ "anusvaradeva": 0x0902,
+ "visargadeva": 0x0903,
+ "adeva": 0x0905,
+ "aadeva": 0x0906,
+ "ideva": 0x0907,
+ "iideva": 0x0908,
+ "udeva": 0x0909,
+ "uudeva": 0x090A,
+ "rvocalicdeva": 0x090B,
+ "lvocalicdeva": 0x090C,
+ "ecandradeva": 0x090D,
+ "eshortdeva": 0x090E,
+ "edeva": 0x090F,
+ "aideva": 0x0910,
+ "ocandradeva": 0x0911,
+ "oshortdeva": 0x0912,
+ "odeva": 0x0913,
+ "audeva": 0x0914,
+ "kadeva": 0x0915,
+ "khadeva": 0x0916,
+ "gadeva": 0x0917,
+ "ghadeva": 0x0918,
+ "ngadeva": 0x0919,
+ "cadeva": 0x091A,
+ "chadeva": 0x091B,
+ "jadeva": 0x091C,
+ "jhadeva": 0x091D,
+ "nyadeva": 0x091E,
+ "ttadeva": 0x091F,
+ "tthadeva": 0x0920,
+ "ddadeva": 0x0921,
+ "ddhadeva": 0x0922,
+ "nnadeva": 0x0923,
+ "tadeva": 0x0924,
+ "thadeva": 0x0925,
+ "dadeva": 0x0926,
+ "dhadeva": 0x0927,
+ "nadeva": 0x0928,
+ "nnnadeva": 0x0929,
+ "padeva": 0x092A,
+ "phadeva": 0x092B,
+ "badeva": 0x092C,
+ "bhadeva": 0x092D,
+ "madeva": 0x092E,
+ "yadeva": 0x092F,
+ "radeva": 0x0930,
+ "rradeva": 0x0931,
+ "ladeva": 0x0932,
+ "lladeva": 0x0933,
+ "llladeva": 0x0934,
+ "vadeva": 0x0935,
+ "shadeva": 0x0936,
+ "ssadeva": 0x0937,
+ "sadeva": 0x0938,
+ "hadeva": 0x0939,
+ "nuktadeva": 0x093C,
+ "avagrahadeva": 0x093D,
+ "aavowelsigndeva": 0x093E,
+ "ivowelsigndeva": 0x093F,
+ "iivowelsigndeva": 0x0940,
+ "uvowelsigndeva": 0x0941,
+ "uuvowelsigndeva": 0x0942,
+ "rvocalicvowelsigndeva": 0x0943,
+ "rrvocalicvowelsigndeva": 0x0944,
+ "ecandravowelsigndeva": 0x0945,
+ "eshortvowelsigndeva": 0x0946,
+ "evowelsigndeva": 0x0947,
+ "aivowelsigndeva": 0x0948,
+ "ocandravowelsigndeva": 0x0949,
+ "oshortvowelsigndeva": 0x094A,
+ "ovowelsigndeva": 0x094B,
+ "auvowelsigndeva": 0x094C,
+ "viramadeva": 0x094D,
+ "omdeva": 0x0950,
+ "udattadeva": 0x0951,
+ "anudattadeva": 0x0952,
+ "gravedeva": 0x0953,
+ "acutedeva": 0x0954,
+ "qadeva": 0x0958,
+ "khhadeva": 0x0959,
+ "ghhadeva": 0x095A,
+ "zadeva": 0x095B,
+ "dddhadeva": 0x095C,
+ "rhadeva": 0x095D,
+ "fadeva": 0x095E,
+ "yyadeva": 0x095F,
+ "rrvocalicdeva": 0x0960,
+ "llvocalicdeva": 0x0961,
+ "lvocalicvowelsigndeva": 0x0962,
+ "llvocalicvowelsigndeva": 0x0963,
+ "danda": 0x0964,
+ "dbldanda": 0x0965,
+ "zerodeva": 0x0966,
+ "onedeva": 0x0967,
+ "twodeva": 0x0968,
+ "threedeva": 0x0969,
+ "fourdeva": 0x096A,
+ "fivedeva": 0x096B,
+ "sixdeva": 0x096C,
+ "sevendeva": 0x096D,
+ "eightdeva": 0x096E,
+ "ninedeva": 0x096F,
+ "abbreviationsigndeva": 0x0970,
+ "candrabindubengali": 0x0981,
+ "anusvarabengali": 0x0982,
+ "visargabengali": 0x0983,
+ "abengali": 0x0985,
+ "aabengali": 0x0986,
+ "ibengali": 0x0987,
+ "iibengali": 0x0988,
+ "ubengali": 0x0989,
+ "uubengali": 0x098A,
+ "rvocalicbengali": 0x098B,
+ "lvocalicbengali": 0x098C,
+ "ebengali": 0x098F,
+ "aibengali": 0x0990,
+ "obengali": 0x0993,
+ "aubengali": 0x0994,
+ "kabengali": 0x0995,
+ "khabengali": 0x0996,
+ "gabengali": 0x0997,
+ "ghabengali": 0x0998,
+ "ngabengali": 0x0999,
+ "cabengali": 0x099A,
+ "chabengali": 0x099B,
+ "jabengali": 0x099C,
+ "jhabengali": 0x099D,
+ "nyabengali": 0x099E,
+ "ttabengali": 0x099F,
+ "tthabengali": 0x09A0,
+ "ddabengali": 0x09A1,
+ "ddhabengali": 0x09A2,
+ "nnabengali": 0x09A3,
+ "tabengali": 0x09A4,
+ "thabengali": 0x09A5,
+ "dabengali": 0x09A6,
+ "dhabengali": 0x09A7,
+ "nabengali": 0x09A8,
+ "pabengali": 0x09AA,
+ "phabengali": 0x09AB,
+ "babengali": 0x09AC,
+ "bhabengali": 0x09AD,
+ "mabengali": 0x09AE,
+ "yabengali": 0x09AF,
+ "rabengali": 0x09B0,
+ "labengali": 0x09B2,
+ "shabengali": 0x09B6,
+ "ssabengali": 0x09B7,
+ "sabengali": 0x09B8,
+ "habengali": 0x09B9,
+ "nuktabengali": 0x09BC,
+ "aavowelsignbengali": 0x09BE,
+ "ivowelsignbengali": 0x09BF,
+ "iivowelsignbengali": 0x09C0,
+ "uvowelsignbengali": 0x09C1,
+ "uuvowelsignbengali": 0x09C2,
+ "rvocalicvowelsignbengali": 0x09C3,
+ "rrvocalicvowelsignbengali": 0x09C4,
+ "evowelsignbengali": 0x09C7,
+ "aivowelsignbengali": 0x09C8,
+ "ovowelsignbengali": 0x09CB,
+ "auvowelsignbengali": 0x09CC,
+ "viramabengali": 0x09CD,
+ "aulengthmarkbengali": 0x09D7,
+ "rrabengali": 0x09DC,
+ "rhabengali": 0x09DD,
+ "yyabengali": 0x09DF,
+ "rrvocalicbengali": 0x09E0,
+ "llvocalicbengali": 0x09E1,
+ "lvocalicvowelsignbengali": 0x09E2,
+ "llvocalicvowelsignbengali": 0x09E3,
+ "zerobengali": 0x09E6,
+ "onebengali": 0x09E7,
+ "twobengali": 0x09E8,
+ "threebengali": 0x09E9,
+ "fourbengali": 0x09EA,
+ "fivebengali": 0x09EB,
+ "sixbengali": 0x09EC,
+ "sevenbengali": 0x09ED,
+ "eightbengali": 0x09EE,
+ "ninebengali": 0x09EF,
+ "ramiddlediagonalbengali": 0x09F0,
+ "ralowerdiagonalbengali": 0x09F1,
+ "rupeemarkbengali": 0x09F2,
+ "rupeesignbengali": 0x09F3,
+ "onenumeratorbengali": 0x09F4,
+ "twonumeratorbengali": 0x09F5,
+ "threenumeratorbengali": 0x09F6,
+ "fournumeratorbengali": 0x09F7,
+ "denominatorminusonenumeratorbengali": 0x09F8,
+ "sixteencurrencydenominatorbengali": 0x09F9,
+ "issharbengali": 0x09FA,
+ "bindigurmukhi": 0x0A02,
+ "agurmukhi": 0x0A05,
+ "aagurmukhi": 0x0A06,
+ "igurmukhi": 0x0A07,
+ "iigurmukhi": 0x0A08,
+ "ugurmukhi": 0x0A09,
+ "uugurmukhi": 0x0A0A,
+ "eegurmukhi": 0x0A0F,
+ "aigurmukhi": 0x0A10,
+ "oogurmukhi": 0x0A13,
+ "augurmukhi": 0x0A14,
+ "kagurmukhi": 0x0A15,
+ "khagurmukhi": 0x0A16,
+ "gagurmukhi": 0x0A17,
+ "ghagurmukhi": 0x0A18,
+ "ngagurmukhi": 0x0A19,
+ "cagurmukhi": 0x0A1A,
+ "chagurmukhi": 0x0A1B,
+ "jagurmukhi": 0x0A1C,
+ "jhagurmukhi": 0x0A1D,
+ "nyagurmukhi": 0x0A1E,
+ "ttagurmukhi": 0x0A1F,
+ "tthagurmukhi": 0x0A20,
+ "ddagurmukhi": 0x0A21,
+ "ddhagurmukhi": 0x0A22,
+ "nnagurmukhi": 0x0A23,
+ "tagurmukhi": 0x0A24,
+ "thagurmukhi": 0x0A25,
+ "dagurmukhi": 0x0A26,
+ "dhagurmukhi": 0x0A27,
+ "nagurmukhi": 0x0A28,
+ "pagurmukhi": 0x0A2A,
+ "phagurmukhi": 0x0A2B,
+ "bagurmukhi": 0x0A2C,
+ "bhagurmukhi": 0x0A2D,
+ "magurmukhi": 0x0A2E,
+ "yagurmukhi": 0x0A2F,
+ "ragurmukhi": 0x0A30,
+ "lagurmukhi": 0x0A32,
+ "vagurmukhi": 0x0A35,
+ "shagurmukhi": 0x0A36,
+ "sagurmukhi": 0x0A38,
+ "hagurmukhi": 0x0A39,
+ "nuktagurmukhi": 0x0A3C,
+ "aamatragurmukhi": 0x0A3E,
+ "imatragurmukhi": 0x0A3F,
+ "iimatragurmukhi": 0x0A40,
+ "umatragurmukhi": 0x0A41,
+ "uumatragurmukhi": 0x0A42,
+ "eematragurmukhi": 0x0A47,
+ "aimatragurmukhi": 0x0A48,
+ "oomatragurmukhi": 0x0A4B,
+ "aumatragurmukhi": 0x0A4C,
+ "halantgurmukhi": 0x0A4D,
+ "khhagurmukhi": 0x0A59,
+ "ghhagurmukhi": 0x0A5A,
+ "zagurmukhi": 0x0A5B,
+ "rragurmukhi": 0x0A5C,
+ "fagurmukhi": 0x0A5E,
+ "zerogurmukhi": 0x0A66,
+ "onegurmukhi": 0x0A67,
+ "twogurmukhi": 0x0A68,
+ "threegurmukhi": 0x0A69,
+ "fourgurmukhi": 0x0A6A,
+ "fivegurmukhi": 0x0A6B,
+ "sixgurmukhi": 0x0A6C,
+ "sevengurmukhi": 0x0A6D,
+ "eightgurmukhi": 0x0A6E,
+ "ninegurmukhi": 0x0A6F,
+ "tippigurmukhi": 0x0A70,
+ "addakgurmukhi": 0x0A71,
+ "irigurmukhi": 0x0A72,
+ "uragurmukhi": 0x0A73,
+ "ekonkargurmukhi": 0x0A74,
+ "candrabindugujarati": 0x0A81,
+ "anusvaragujarati": 0x0A82,
+ "visargagujarati": 0x0A83,
+ "agujarati": 0x0A85,
+ "aagujarati": 0x0A86,
+ "igujarati": 0x0A87,
+ "iigujarati": 0x0A88,
+ "ugujarati": 0x0A89,
+ "uugujarati": 0x0A8A,
+ "rvocalicgujarati": 0x0A8B,
+ "ecandragujarati": 0x0A8D,
+ "egujarati": 0x0A8F,
+ "aigujarati": 0x0A90,
+ "ocandragujarati": 0x0A91,
+ "ogujarati": 0x0A93,
+ "augujarati": 0x0A94,
+ "kagujarati": 0x0A95,
+ "khagujarati": 0x0A96,
+ "gagujarati": 0x0A97,
+ "ghagujarati": 0x0A98,
+ "ngagujarati": 0x0A99,
+ "cagujarati": 0x0A9A,
+ "chagujarati": 0x0A9B,
+ "jagujarati": 0x0A9C,
+ "jhagujarati": 0x0A9D,
+ "nyagujarati": 0x0A9E,
+ "ttagujarati": 0x0A9F,
+ "tthagujarati": 0x0AA0,
+ "ddagujarati": 0x0AA1,
+ "ddhagujarati": 0x0AA2,
+ "nnagujarati": 0x0AA3,
+ "tagujarati": 0x0AA4,
+ "thagujarati": 0x0AA5,
+ "dagujarati": 0x0AA6,
+ "dhagujarati": 0x0AA7,
+ "nagujarati": 0x0AA8,
+ "pagujarati": 0x0AAA,
+ "phagujarati": 0x0AAB,
+ "bagujarati": 0x0AAC,
+ "bhagujarati": 0x0AAD,
+ "magujarati": 0x0AAE,
+ "yagujarati": 0x0AAF,
+ "ragujarati": 0x0AB0,
+ "lagujarati": 0x0AB2,
+ "llagujarati": 0x0AB3,
+ "vagujarati": 0x0AB5,
+ "shagujarati": 0x0AB6,
+ "ssagujarati": 0x0AB7,
+ "sagujarati": 0x0AB8,
+ "hagujarati": 0x0AB9,
+ "nuktagujarati": 0x0ABC,
+ "aavowelsigngujarati": 0x0ABE,
+ "ivowelsigngujarati": 0x0ABF,
+ "iivowelsigngujarati": 0x0AC0,
+ "uvowelsigngujarati": 0x0AC1,
+ "uuvowelsigngujarati": 0x0AC2,
+ "rvocalicvowelsigngujarati": 0x0AC3,
+ "rrvocalicvowelsigngujarati": 0x0AC4,
+ "ecandravowelsigngujarati": 0x0AC5,
+ "evowelsigngujarati": 0x0AC7,
+ "aivowelsigngujarati": 0x0AC8,
+ "ocandravowelsigngujarati": 0x0AC9,
+ "ovowelsigngujarati": 0x0ACB,
+ "auvowelsigngujarati": 0x0ACC,
+ "viramagujarati": 0x0ACD,
+ "omgujarati": 0x0AD0,
+ "rrvocalicgujarati": 0x0AE0,
+ "zerogujarati": 0x0AE6,
+ "onegujarati": 0x0AE7,
+ "twogujarati": 0x0AE8,
+ "threegujarati": 0x0AE9,
+ "fourgujarati": 0x0AEA,
+ "fivegujarati": 0x0AEB,
+ "sixgujarati": 0x0AEC,
+ "sevengujarati": 0x0AED,
+ "eightgujarati": 0x0AEE,
+ "ninegujarati": 0x0AEF,
+ "kokaithai": 0x0E01,
+ "khokhaithai": 0x0E02,
+ "khokhuatthai": 0x0E03,
+ "khokhwaithai": 0x0E04,
+ "khokhonthai": 0x0E05,
+ "khorakhangthai": 0x0E06,
+ "ngonguthai": 0x0E07,
+ "chochanthai": 0x0E08,
+ "chochingthai": 0x0E09,
+ "chochangthai": 0x0E0A,
+ "sosothai": 0x0E0B,
+ "chochoethai": 0x0E0C,
+ "yoyingthai": 0x0E0D,
+ "dochadathai": 0x0E0E,
+ "topatakthai": 0x0E0F,
+ "thothanthai": 0x0E10,
+ "thonangmonthothai": 0x0E11,
+ "thophuthaothai": 0x0E12,
+ "nonenthai": 0x0E13,
+ "dodekthai": 0x0E14,
+ "totaothai": 0x0E15,
+ "thothungthai": 0x0E16,
+ "thothahanthai": 0x0E17,
+ "thothongthai": 0x0E18,
+ "nonuthai": 0x0E19,
+ "bobaimaithai": 0x0E1A,
+ "poplathai": 0x0E1B,
+ "phophungthai": 0x0E1C,
+ "fofathai": 0x0E1D,
+ "phophanthai": 0x0E1E,
+ "fofanthai": 0x0E1F,
+ "phosamphaothai": 0x0E20,
+ "momathai": 0x0E21,
+ "yoyakthai": 0x0E22,
+ "roruathai": 0x0E23,
+ "ruthai": 0x0E24,
+ "lolingthai": 0x0E25,
+ "luthai": 0x0E26,
+ "wowaenthai": 0x0E27,
+ "sosalathai": 0x0E28,
+ "sorusithai": 0x0E29,
+ "sosuathai": 0x0E2A,
+ "hohipthai": 0x0E2B,
+ "lochulathai": 0x0E2C,
+ "oangthai": 0x0E2D,
+ "honokhukthai": 0x0E2E,
+ "paiyannoithai": 0x0E2F,
+ "saraathai": 0x0E30,
+ "maihanakatthai": 0x0E31,
+ "saraaathai": 0x0E32,
+ "saraamthai": 0x0E33,
+ "saraithai": 0x0E34,
+ "saraiithai": 0x0E35,
+ "sarauethai": 0x0E36,
+ "saraueethai": 0x0E37,
+ "sarauthai": 0x0E38,
+ "sarauuthai": 0x0E39,
+ "phinthuthai": 0x0E3A,
+ "bahtthai": 0x0E3F,
+ "saraethai": 0x0E40,
+ "saraaethai": 0x0E41,
+ "saraothai": 0x0E42,
+ "saraaimaimuanthai": 0x0E43,
+ "saraaimaimalaithai": 0x0E44,
+ "lakkhangyaothai": 0x0E45,
+ "maiyamokthai": 0x0E46,
+ "maitaikhuthai": 0x0E47,
+ "maiekthai": 0x0E48,
+ "maithothai": 0x0E49,
+ "maitrithai": 0x0E4A,
+ "maichattawathai": 0x0E4B,
+ "thanthakhatthai": 0x0E4C,
+ "nikhahitthai": 0x0E4D,
+ "yamakkanthai": 0x0E4E,
+ "fongmanthai": 0x0E4F,
+ "zerothai": 0x0E50,
+ "onethai": 0x0E51,
+ "twothai": 0x0E52,
+ "threethai": 0x0E53,
+ "fourthai": 0x0E54,
+ "fivethai": 0x0E55,
+ "sixthai": 0x0E56,
+ "seventhai": 0x0E57,
+ "eightthai": 0x0E58,
+ "ninethai": 0x0E59,
+ "angkhankhuthai": 0x0E5A,
+ "khomutthai": 0x0E5B,
+ "Aringbelow": 0x1E00,
+ "aringbelow": 0x1E01,
+ "Bdotaccent": 0x1E02,
+ "bdotaccent": 0x1E03,
+ "Bdotbelow": 0x1E04,
+ "bdotbelow": 0x1E05,
+ "Blinebelow": 0x1E06,
+ "blinebelow": 0x1E07,
+ "Ccedillaacute": 0x1E08,
+ "ccedillaacute": 0x1E09,
+ "Ddotaccent": 0x1E0A,
+ "ddotaccent": 0x1E0B,
+ "Ddotbelow": 0x1E0C,
+ "ddotbelow": 0x1E0D,
+ "Dlinebelow": 0x1E0E,
+ "dlinebelow": 0x1E0F,
+ "Dcedilla": 0x1E10,
+ "dcedilla": 0x1E11,
+ "Dcircumflexbelow": 0x1E12,
+ "dcircumflexbelow": 0x1E13,
+ "Emacrongrave": 0x1E14,
+ "emacrongrave": 0x1E15,
+ "Emacronacute": 0x1E16,
+ "emacronacute": 0x1E17,
+ "Ecircumflexbelow": 0x1E18,
+ "ecircumflexbelow": 0x1E19,
+ "Etildebelow": 0x1E1A,
+ "etildebelow": 0x1E1B,
+ "Ecedillabreve": 0x1E1C,
+ "ecedillabreve": 0x1E1D,
+ "Fdotaccent": 0x1E1E,
+ "fdotaccent": 0x1E1F,
+ "Gmacron": 0x1E20,
+ "gmacron": 0x1E21,
+ "Hdotaccent": 0x1E22,
+ "hdotaccent": 0x1E23,
+ "Hdotbelow": 0x1E24,
+ "hdotbelow": 0x1E25,
+ "Hdieresis": 0x1E26,
+ "hdieresis": 0x1E27,
+ "Hcedilla": 0x1E28,
+ "hcedilla": 0x1E29,
+ "Hbrevebelow": 0x1E2A,
+ "hbrevebelow": 0x1E2B,
+ "Itildebelow": 0x1E2C,
+ "itildebelow": 0x1E2D,
+ "Idieresisacute": 0x1E2E,
+ "idieresisacute": 0x1E2F,
+ "Kacute": 0x1E30,
+ "kacute": 0x1E31,
+ "Kdotbelow": 0x1E32,
+ "kdotbelow": 0x1E33,
+ "Klinebelow": 0x1E34,
+ "klinebelow": 0x1E35,
+ "Ldotbelow": 0x1E36,
+ "ldotbelow": 0x1E37,
+ "Ldotbelowmacron": 0x1E38,
+ "ldotbelowmacron": 0x1E39,
+ "Llinebelow": 0x1E3A,
+ "llinebelow": 0x1E3B,
+ "Lcircumflexbelow": 0x1E3C,
+ "lcircumflexbelow": 0x1E3D,
+ "Macute": 0x1E3E,
+ "macute": 0x1E3F,
+ "Mdotaccent": 0x1E40,
+ "mdotaccent": 0x1E41,
+ "Mdotbelow": 0x1E42,
+ "mdotbelow": 0x1E43,
+ "Ndotaccent": 0x1E44,
+ "ndotaccent": 0x1E45,
+ "Ndotbelow": 0x1E46,
+ "ndotbelow": 0x1E47,
+ "Nlinebelow": 0x1E48,
+ "nlinebelow": 0x1E49,
+ "Ncircumflexbelow": 0x1E4A,
+ "ncircumflexbelow": 0x1E4B,
+ "Otildeacute": 0x1E4C,
+ "otildeacute": 0x1E4D,
+ "Otildedieresis": 0x1E4E,
+ "otildedieresis": 0x1E4F,
+ "Omacrongrave": 0x1E50,
+ "omacrongrave": 0x1E51,
+ "Omacronacute": 0x1E52,
+ "omacronacute": 0x1E53,
+ "Pacute": 0x1E54,
+ "pacute": 0x1E55,
+ "Pdotaccent": 0x1E56,
+ "pdotaccent": 0x1E57,
+ "Rdotaccent": 0x1E58,
+ "rdotaccent": 0x1E59,
+ "Rdotbelow": 0x1E5A,
+ "rdotbelow": 0x1E5B,
+ "Rdotbelowmacron": 0x1E5C,
+ "rdotbelowmacron": 0x1E5D,
+ "Rlinebelow": 0x1E5E,
+ "rlinebelow": 0x1E5F,
+ "Sdotaccent": 0x1E60,
+ "sdotaccent": 0x1E61,
+ "Sdotbelow": 0x1E62,
+ "sdotbelow": 0x1E63,
+ "Sacutedotaccent": 0x1E64,
+ "sacutedotaccent": 0x1E65,
+ "Scarondotaccent": 0x1E66,
+ "scarondotaccent": 0x1E67,
+ "Sdotbelowdotaccent": 0x1E68,
+ "sdotbelowdotaccent": 0x1E69,
+ "Tdotaccent": 0x1E6A,
+ "tdotaccent": 0x1E6B,
+ "Tdotbelow": 0x1E6C,
+ "tdotbelow": 0x1E6D,
+ "Tlinebelow": 0x1E6E,
+ "tlinebelow": 0x1E6F,
+ "Tcircumflexbelow": 0x1E70,
+ "tcircumflexbelow": 0x1E71,
+ "Udieresisbelow": 0x1E72,
+ "udieresisbelow": 0x1E73,
+ "Utildebelow": 0x1E74,
+ "utildebelow": 0x1E75,
+ "Ucircumflexbelow": 0x1E76,
+ "ucircumflexbelow": 0x1E77,
+ "Utildeacute": 0x1E78,
+ "utildeacute": 0x1E79,
+ "Umacrondieresis": 0x1E7A,
+ "umacrondieresis": 0x1E7B,
+ "Vtilde": 0x1E7C,
+ "vtilde": 0x1E7D,
+ "Vdotbelow": 0x1E7E,
+ "vdotbelow": 0x1E7F,
+ "Wgrave": 0x1E80,
+ "wgrave": 0x1E81,
+ "Wacute": 0x1E82,
+ "wacute": 0x1E83,
+ "Wdieresis": 0x1E84,
+ "wdieresis": 0x1E85,
+ "Wdotaccent": 0x1E86,
+ "wdotaccent": 0x1E87,
+ "Wdotbelow": 0x1E88,
+ "wdotbelow": 0x1E89,
+ "Xdotaccent": 0x1E8A,
+ "xdotaccent": 0x1E8B,
+ "Xdieresis": 0x1E8C,
+ "xdieresis": 0x1E8D,
+ "Ydotaccent": 0x1E8E,
+ "ydotaccent": 0x1E8F,
+ "Zcircumflex": 0x1E90,
+ "zcircumflex": 0x1E91,
+ "Zdotbelow": 0x1E92,
+ "zdotbelow": 0x1E93,
+ "Zlinebelow": 0x1E94,
+ "zlinebelow": 0x1E95,
+ "hlinebelow": 0x1E96,
+ "tdieresis": 0x1E97,
+ "wring": 0x1E98,
+ "yring": 0x1E99,
+ "arighthalfring": 0x1E9A,
+ "slongdotaccent": 0x1E9B,
+ "Adotbelow": 0x1EA0,
+ "adotbelow": 0x1EA1,
+ "Ahookabove": 0x1EA2,
+ "ahookabove": 0x1EA3,
+ "Acircumflexacute": 0x1EA4,
+ "acircumflexacute": 0x1EA5,
+ "Acircumflexgrave": 0x1EA6,
+ "acircumflexgrave": 0x1EA7,
+ "Acircumflexhookabove": 0x1EA8,
+ "acircumflexhookabove": 0x1EA9,
+ "Acircumflextilde": 0x1EAA,
+ "acircumflextilde": 0x1EAB,
+ "Acircumflexdotbelow": 0x1EAC,
+ "acircumflexdotbelow": 0x1EAD,
+ "Abreveacute": 0x1EAE,
+ "abreveacute": 0x1EAF,
+ "Abrevegrave": 0x1EB0,
+ "abrevegrave": 0x1EB1,
+ "Abrevehookabove": 0x1EB2,
+ "abrevehookabove": 0x1EB3,
+ "Abrevetilde": 0x1EB4,
+ "abrevetilde": 0x1EB5,
+ "Abrevedotbelow": 0x1EB6,
+ "abrevedotbelow": 0x1EB7,
+ "Edotbelow": 0x1EB8,
+ "edotbelow": 0x1EB9,
+ "Ehookabove": 0x1EBA,
+ "ehookabove": 0x1EBB,
+ "Etilde": 0x1EBC,
+ "etilde": 0x1EBD,
+ "Ecircumflexacute": 0x1EBE,
+ "ecircumflexacute": 0x1EBF,
+ "Ecircumflexgrave": 0x1EC0,
+ "ecircumflexgrave": 0x1EC1,
+ "Ecircumflexhookabove": 0x1EC2,
+ "ecircumflexhookabove": 0x1EC3,
+ "Ecircumflextilde": 0x1EC4,
+ "ecircumflextilde": 0x1EC5,
+ "Ecircumflexdotbelow": 0x1EC6,
+ "ecircumflexdotbelow": 0x1EC7,
+ "Ihookabove": 0x1EC8,
+ "ihookabove": 0x1EC9,
+ "Idotbelow": 0x1ECA,
+ "idotbelow": 0x1ECB,
+ "Odotbelow": 0x1ECC,
+ "odotbelow": 0x1ECD,
+ "Ohookabove": 0x1ECE,
+ "ohookabove": 0x1ECF,
+ "Ocircumflexacute": 0x1ED0,
+ "ocircumflexacute": 0x1ED1,
+ "Ocircumflexgrave": 0x1ED2,
+ "ocircumflexgrave": 0x1ED3,
+ "Ocircumflexhookabove": 0x1ED4,
+ "ocircumflexhookabove": 0x1ED5,
+ "Ocircumflextilde": 0x1ED6,
+ "ocircumflextilde": 0x1ED7,
+ "Ocircumflexdotbelow": 0x1ED8,
+ "ocircumflexdotbelow": 0x1ED9,
+ "Ohornacute": 0x1EDA,
+ "ohornacute": 0x1EDB,
+ "Ohorngrave": 0x1EDC,
+ "ohorngrave": 0x1EDD,
+ "Ohornhookabove": 0x1EDE,
+ "ohornhookabove": 0x1EDF,
+ "Ohorntilde": 0x1EE0,
+ "ohorntilde": 0x1EE1,
+ "Ohorndotbelow": 0x1EE2,
+ "ohorndotbelow": 0x1EE3,
+ "Udotbelow": 0x1EE4,
+ "udotbelow": 0x1EE5,
+ "Uhookabove": 0x1EE6,
+ "uhookabove": 0x1EE7,
+ "Uhornacute": 0x1EE8,
+ "uhornacute": 0x1EE9,
+ "Uhorngrave": 0x1EEA,
+ "uhorngrave": 0x1EEB,
+ "Uhornhookabove": 0x1EEC,
+ "uhornhookabove": 0x1EED,
+ "Uhorntilde": 0x1EEE,
+ "uhorntilde": 0x1EEF,
+ "Uhorndotbelow": 0x1EF0,
+ "uhorndotbelow": 0x1EF1,
+ "Ygrave": 0x1EF2,
+ "ygrave": 0x1EF3,
+ "Ydotbelow": 0x1EF4,
+ "ydotbelow": 0x1EF5,
+ "Yhookabove": 0x1EF6,
+ "yhookabove": 0x1EF7,
+ "Ytilde": 0x1EF8,
+ "ytilde": 0x1EF9,
+ "zerowidthspace": 0x200B,
+ "hyphentwo": 0x2010,
+ "figuredash": 0x2012,
+ "afii00208": 0x2015,
+ "horizontalbar": 0x2015,
+ "dblverticalbar": 0x2016,
+ "dbllowline": 0x2017,
+ "underscoredbl": 0x2017,
+ "quoteleftreversed": 0x201B,
+ "quotereversed": 0x201B,
+ "onedotenleader": 0x2024,
+ "twodotenleader": 0x2025,
+ "twodotleader": 0x2025,
+ "afii61573": 0x202C,
+ "afii61574": 0x202D,
+ "afii61575": 0x202E,
+ "primereversed": 0x2035,
+ "referencemark": 0x203B,
+ "exclamdbl": 0x203C,
+ "asterism": 0x2042,
+ "zerosuperior": 0x2070,
+ "foursuperior": 0x2074,
+ "fivesuperior": 0x2075,
+ "sixsuperior": 0x2076,
+ "sevensuperior": 0x2077,
+ "eightsuperior": 0x2078,
+ "ninesuperior": 0x2079,
+ "plussuperior": 0x207A,
+ "equalsuperior": 0x207C,
+ "parenleftsuperior": 0x207D,
+ "parenrightsuperior": 0x207E,
+ "nsuperior": 0x207F,
+ "zeroinferior": 0x2080,
+ "oneinferior": 0x2081,
+ "twoinferior": 0x2082,
+ "threeinferior": 0x2083,
+ "fourinferior": 0x2084,
+ "fiveinferior": 0x2085,
+ "sixinferior": 0x2086,
+ "seveninferior": 0x2087,
+ "eightinferior": 0x2088,
+ "nineinferior": 0x2089,
+ "parenleftinferior": 0x208D,
+ "parenrightinferior": 0x208E,
+ "colonmonetary": 0x20A1,
+ "colonsign": 0x20A1,
+ "cruzeiro": 0x20A2,
+ "franc": 0x20A3,
+ "afii08941": 0x20A4,
+ "lira": 0x20A4,
+ "peseta": 0x20A7,
+ "won": 0x20A9,
+ "afii57636": 0x20AA,
+ "newsheqelsign": 0x20AA,
+ "sheqel": 0x20AA,
+ "sheqelhebrew": 0x20AA,
+ "dong": 0x20AB,
+ "centigrade": 0x2103,
+ "afii61248": 0x2105,
+ "careof": 0x2105,
+ "fahrenheit": 0x2109,
+ "afii61289": 0x2113,
+ "lsquare": 0x2113,
+ "afii61352": 0x2116,
+ "numero": 0x2116,
+ "prescription": 0x211E,
+ "telephone": 0x2121,
+ "Ohm": 0x2126,
+ "Omega": 0x2126,
+ "angstrom": 0x212B,
+ "estimated": 0x212E,
+ "onethird": 0x2153,
+ "twothirds": 0x2154,
+ "oneeighth": 0x215B,
+ "threeeighths": 0x215C,
+ "fiveeighths": 0x215D,
+ "seveneighths": 0x215E,
+ "Oneroman": 0x2160,
+ "Tworoman": 0x2161,
+ "Threeroman": 0x2162,
+ "Fourroman": 0x2163,
+ "Fiveroman": 0x2164,
+ "Sixroman": 0x2165,
+ "Sevenroman": 0x2166,
+ "Eightroman": 0x2167,
+ "Nineroman": 0x2168,
+ "Tenroman": 0x2169,
+ "Elevenroman": 0x216A,
+ "Twelveroman": 0x216B,
+ "oneroman": 0x2170,
+ "tworoman": 0x2171,
+ "threeroman": 0x2172,
+ "fourroman": 0x2173,
+ "fiveroman": 0x2174,
+ "sixroman": 0x2175,
+ "sevenroman": 0x2176,
+ "eightroman": 0x2177,
+ "nineroman": 0x2178,
+ "tenroman": 0x2179,
+ "elevenroman": 0x217A,
+ "twelveroman": 0x217B,
+ "arrowupdn": 0x2195,
+ "arrowupleft": 0x2196,
+ "arrowupright": 0x2197,
+ "arrowdownright": 0x2198,
+ "arrowdownleft": 0x2199,
+ "arrowupdnbse": 0x21A8,
+ "arrowupdownbase": 0x21A8,
+ "harpoonleftbarbup": 0x21BC,
+ "harpoonrightbarbup": 0x21C0,
+ "arrowrightoverleft": 0x21C4,
+ "arrowupleftofdown": 0x21C5,
+ "arrowleftoverright": 0x21C6,
+ "arrowleftdblstroke": 0x21CD,
+ "arrowrightdblstroke": 0x21CF,
+ "pageup": 0x21DE,
+ "pagedown": 0x21DF,
+ "arrowdashleft": 0x21E0,
+ "arrowdashup": 0x21E1,
+ "arrowdashright": 0x21E2,
+ "arrowdashdown": 0x21E3,
+ "arrowtableft": 0x21E4,
+ "arrowtabright": 0x21E5,
+ "arrowleftwhite": 0x21E6,
+ "arrowupwhite": 0x21E7,
+ "arrowrightwhite": 0x21E8,
+ "arrowdownwhite": 0x21E9,
+ "capslock": 0x21EA,
+ "Delta": 0x2206,
+ "increment": 0x2206,
+ "notcontains": 0x220C,
+ "minusplus": 0x2213,
+ "divisionslash": 0x2215,
+ "bulletoperator": 0x2219,
+ "orthogonal": 0x221F,
+ "rightangle": 0x221F,
+ "divides": 0x2223,
+ "parallel": 0x2225,
+ "notparallel": 0x2226,
+ "dblintegral": 0x222C,
+ "contourintegral": 0x222E,
+ "because": 0x2235,
+ "ratio": 0x2236,
+ "proportion": 0x2237,
+ "reversedtilde": 0x223D,
+ "asymptoticallyequal": 0x2243,
+ "allequal": 0x224C,
+ "approaches": 0x2250,
+ "geometricallyequal": 0x2251,
+ "approxequalorimage": 0x2252,
+ "imageorapproximatelyequal": 0x2253,
+ "notidentical": 0x2262,
+ "lessoverequal": 0x2266,
+ "greateroverequal": 0x2267,
+ "muchless": 0x226A,
+ "muchgreater": 0x226B,
+ "notless": 0x226E,
+ "notgreater": 0x226F,
+ "notlessnorequal": 0x2270,
+ "notgreaternorequal": 0x2271,
+ "lessorequivalent": 0x2272,
+ "greaterorequivalent": 0x2273,
+ "lessorgreater": 0x2276,
+ "greaterorless": 0x2277,
+ "notgreaternorless": 0x2279,
+ "precedes": 0x227A,
+ "succeeds": 0x227B,
+ "notprecedes": 0x2280,
+ "notsucceeds": 0x2281,
+ "notsuperset": 0x2285,
+ "subsetnotequal": 0x228A,
+ "supersetnotequal": 0x228B,
+ "minuscircle": 0x2296,
+ "circleot": 0x2299,
+ "tackleft": 0x22A3,
+ "tackdown": 0x22A4,
+ "righttriangle": 0x22BF,
+ "curlyor": 0x22CE,
+ "curlyand": 0x22CF,
+ "lessequalorgreater": 0x22DA,
+ "greaterequalorless": 0x22DB,
+ "ellipsisvertical": 0x22EE,
+ "house": 0x2302,
+ "control": 0x2303,
+ "projective": 0x2305,
+ "logicalnotreversed": 0x2310,
+ "revlogicalnot": 0x2310,
+ "arc": 0x2312,
+ "propellor": 0x2318,
+ "integraltop": 0x2320,
+ "integraltp": 0x2320,
+ "integralbottom": 0x2321,
+ "integralbt": 0x2321,
+ "option": 0x2325,
+ "deleteright": 0x2326,
+ "clear": 0x2327,
+ "deleteleft": 0x232B,
+ "blank": 0x2423,
+ "onecircle": 0x2460,
+ "twocircle": 0x2461,
+ "threecircle": 0x2462,
+ "fourcircle": 0x2463,
+ "fivecircle": 0x2464,
+ "sixcircle": 0x2465,
+ "sevencircle": 0x2466,
+ "eightcircle": 0x2467,
+ "ninecircle": 0x2468,
+ "tencircle": 0x2469,
+ "elevencircle": 0x246A,
+ "twelvecircle": 0x246B,
+ "thirteencircle": 0x246C,
+ "fourteencircle": 0x246D,
+ "fifteencircle": 0x246E,
+ "sixteencircle": 0x246F,
+ "seventeencircle": 0x2470,
+ "eighteencircle": 0x2471,
+ "nineteencircle": 0x2472,
+ "twentycircle": 0x2473,
+ "oneparen": 0x2474,
+ "twoparen": 0x2475,
+ "threeparen": 0x2476,
+ "fourparen": 0x2477,
+ "fiveparen": 0x2478,
+ "sixparen": 0x2479,
+ "sevenparen": 0x247A,
+ "eightparen": 0x247B,
+ "nineparen": 0x247C,
+ "tenparen": 0x247D,
+ "elevenparen": 0x247E,
+ "twelveparen": 0x247F,
+ "thirteenparen": 0x2480,
+ "fourteenparen": 0x2481,
+ "fifteenparen": 0x2482,
+ "sixteenparen": 0x2483,
+ "seventeenparen": 0x2484,
+ "eighteenparen": 0x2485,
+ "nineteenparen": 0x2486,
+ "twentyparen": 0x2487,
+ "oneperiod": 0x2488,
+ "twoperiod": 0x2489,
+ "threeperiod": 0x248A,
+ "fourperiod": 0x248B,
+ "fiveperiod": 0x248C,
+ "sixperiod": 0x248D,
+ "sevenperiod": 0x248E,
+ "eightperiod": 0x248F,
+ "nineperiod": 0x2490,
+ "tenperiod": 0x2491,
+ "elevenperiod": 0x2492,
+ "twelveperiod": 0x2493,
+ "thirteenperiod": 0x2494,
+ "fourteenperiod": 0x2495,
+ "fifteenperiod": 0x2496,
+ "sixteenperiod": 0x2497,
+ "seventeenperiod": 0x2498,
+ "eighteenperiod": 0x2499,
+ "nineteenperiod": 0x249A,
+ "twentyperiod": 0x249B,
+ "aparen": 0x249C,
+ "bparen": 0x249D,
+ "cparen": 0x249E,
+ "dparen": 0x249F,
+ "eparen": 0x24A0,
+ "fparen": 0x24A1,
+ "gparen": 0x24A2,
+ "hparen": 0x24A3,
+ "iparen": 0x24A4,
+ "jparen": 0x24A5,
+ "kparen": 0x24A6,
+ "lparen": 0x24A7,
+ "mparen": 0x24A8,
+ "nparen": 0x24A9,
+ "oparen": 0x24AA,
+ "pparen": 0x24AB,
+ "qparen": 0x24AC,
+ "rparen": 0x24AD,
+ "sparen": 0x24AE,
+ "tparen": 0x24AF,
+ "uparen": 0x24B0,
+ "vparen": 0x24B1,
+ "wparen": 0x24B2,
+ "xparen": 0x24B3,
+ "yparen": 0x24B4,
+ "zparen": 0x24B5,
+ "Acircle": 0x24B6,
+ "Bcircle": 0x24B7,
+ "Ccircle": 0x24B8,
+ "Dcircle": 0x24B9,
+ "Ecircle": 0x24BA,
+ "Fcircle": 0x24BB,
+ "Gcircle": 0x24BC,
+ "Hcircle": 0x24BD,
+ "Icircle": 0x24BE,
+ "Jcircle": 0x24BF,
+ "Kcircle": 0x24C0,
+ "Lcircle": 0x24C1,
+ "Mcircle": 0x24C2,
+ "Ncircle": 0x24C3,
+ "Ocircle": 0x24C4,
+ "Pcircle": 0x24C5,
+ "Qcircle": 0x24C6,
+ "Rcircle": 0x24C7,
+ "Scircle": 0x24C8,
+ "Tcircle": 0x24C9,
+ "Ucircle": 0x24CA,
+ "Vcircle": 0x24CB,
+ "Wcircle": 0x24CC,
+ "Xcircle": 0x24CD,
+ "Ycircle": 0x24CE,
+ "Zcircle": 0x24CF,
+ "acircle": 0x24D0,
+ "bcircle": 0x24D1,
+ "ccircle": 0x24D2,
+ "dcircle": 0x24D3,
+ "ecircle": 0x24D4,
+ "fcircle": 0x24D5,
+ "gcircle": 0x24D6,
+ "hcircle": 0x24D7,
+ "icircle": 0x24D8,
+ "jcircle": 0x24D9,
+ "kcircle": 0x24DA,
+ "lcircle": 0x24DB,
+ "mcircle": 0x24DC,
+ "ncircle": 0x24DD,
+ "ocircle": 0x24DE,
+ "pcircle": 0x24DF,
+ "qcircle": 0x24E0,
+ "rcircle": 0x24E1,
+ "scircle": 0x24E2,
+ "tcircle": 0x24E3,
+ "ucircle": 0x24E4,
+ "vcircle": 0x24E5,
+ "wcircle": 0x24E6,
+ "xcircle": 0x24E7,
+ "ycircle": 0x24E8,
+ "zcircle": 0x24E9,
+ "SF100000": 0x2500,
+ "SF110000": 0x2502,
+ "SF010000": 0x250C,
+ "SF030000": 0x2510,
+ "SF020000": 0x2514,
+ "SF040000": 0x2518,
+ "SF080000": 0x251C,
+ "SF090000": 0x2524,
+ "SF060000": 0x252C,
+ "SF070000": 0x2534,
+ "SF050000": 0x253C,
+ "SF430000": 0x2550,
+ "SF240000": 0x2551,
+ "SF510000": 0x2552,
+ "SF520000": 0x2553,
+ "SF390000": 0x2554,
+ "SF220000": 0x2555,
+ "SF210000": 0x2556,
+ "SF250000": 0x2557,
+ "SF500000": 0x2558,
+ "SF490000": 0x2559,
+ "SF380000": 0x255A,
+ "SF280000": 0x255B,
+ "SF270000": 0x255C,
+ "SF260000": 0x255D,
+ "SF360000": 0x255E,
+ "SF370000": 0x255F,
+ "SF420000": 0x2560,
+ "SF190000": 0x2561,
+ "SF200000": 0x2562,
+ "SF230000": 0x2563,
+ "SF470000": 0x2564,
+ "SF480000": 0x2565,
+ "SF410000": 0x2566,
+ "SF450000": 0x2567,
+ "SF460000": 0x2568,
+ "SF400000": 0x2569,
+ "SF540000": 0x256A,
+ "SF530000": 0x256B,
+ "SF440000": 0x256C,
+ "upblock": 0x2580,
+ "dnblock": 0x2584,
+ "block": 0x2588,
+ "lfblock": 0x258C,
+ "rtblock": 0x2590,
+ "ltshade": 0x2591,
+ "shadelight": 0x2591,
+ "shade": 0x2592,
+ "shademedium": 0x2592,
+ "dkshade": 0x2593,
+ "shadedark": 0x2593,
+ "blacksquare": 0x25A0,
+ "filledbox": 0x25A0,
+ "H22073": 0x25A1,
+ "whitesquare": 0x25A1,
+ "squarewhitewithsmallblack": 0x25A3,
+ "squarehorizontalfill": 0x25A4,
+ "squareverticalfill": 0x25A5,
+ "squareorthogonalcrosshatchfill": 0x25A6,
+ "squareupperlefttolowerrightfill": 0x25A7,
+ "squareupperrighttolowerleftfill": 0x25A8,
+ "squarediagonalcrosshatchfill": 0x25A9,
+ "H18543": 0x25AA,
+ "blacksmallsquare": 0x25AA,
+ "H18551": 0x25AB,
+ "whitesmallsquare": 0x25AB,
+ "blackrectangle": 0x25AC,
+ "filledrect": 0x25AC,
+ "blackuppointingtriangle": 0x25B2,
+ "triagup": 0x25B2,
+ "whiteuppointingtriangle": 0x25B3,
+ "blackuppointingsmalltriangle": 0x25B4,
+ "whiteuppointingsmalltriangle": 0x25B5,
+ "blackrightpointingtriangle": 0x25B6,
+ "whiterightpointingtriangle": 0x25B7,
+ "whiterightpointingsmalltriangle": 0x25B9,
+ "blackrightpointingpointer": 0x25BA,
+ "triagrt": 0x25BA,
+ "blackdownpointingtriangle": 0x25BC,
+ "triagdn": 0x25BC,
+ "whitedownpointingtriangle": 0x25BD,
+ "whitedownpointingsmalltriangle": 0x25BF,
+ "blackleftpointingtriangle": 0x25C0,
+ "whiteleftpointingtriangle": 0x25C1,
+ "whiteleftpointingsmalltriangle": 0x25C3,
+ "blackleftpointingpointer": 0x25C4,
+ "triaglf": 0x25C4,
+ "blackdiamond": 0x25C6,
+ "whitediamond": 0x25C7,
+ "whitediamondcontainingblacksmalldiamond": 0x25C8,
+ "fisheye": 0x25C9,
+ "circle": 0x25CB,
+ "whitecircle": 0x25CB,
+ "dottedcircle": 0x25CC,
+ "bullseye": 0x25CE,
+ "H18533": 0x25CF,
+ "blackcircle": 0x25CF,
+ "circlewithlefthalfblack": 0x25D0,
+ "circlewithrighthalfblack": 0x25D1,
+ "bulletinverse": 0x25D8,
+ "invbullet": 0x25D8,
+ "invcircle": 0x25D9,
+ "whitecircleinverse": 0x25D9,
+ "blacklowerrighttriangle": 0x25E2,
+ "blacklowerlefttriangle": 0x25E3,
+ "blackupperlefttriangle": 0x25E4,
+ "blackupperrighttriangle": 0x25E5,
+ "openbullet": 0x25E6,
+ "whitebullet": 0x25E6,
+ "largecircle": 0x25EF,
+ "blackstar": 0x2605,
+ "whitestar": 0x2606,
+ "telephoneblack": 0x260E,
+ "whitetelephone": 0x260F,
+ "pointingindexleftwhite": 0x261C,
+ "pointingindexupwhite": 0x261D,
+ "pointingindexrightwhite": 0x261E,
+ "pointingindexdownwhite": 0x261F,
+ "yinyang": 0x262F,
+ "smileface": 0x263A,
+ "whitesmilingface": 0x263A,
+ "blacksmilingface": 0x263B,
+ "invsmileface": 0x263B,
+ "compass": 0x263C,
+ "sun": 0x263C,
+ "female": 0x2640,
+ "venus": 0x2640,
+ "earth": 0x2641,
+ "male": 0x2642,
+ "mars": 0x2642,
+ "heartsuitwhite": 0x2661,
+ "diamondsuitwhite": 0x2662,
+ "spadesuitwhite": 0x2664,
+ "clubsuitwhite": 0x2667,
+ "hotsprings": 0x2668,
+ "quarternote": 0x2669,
+ "musicalnote": 0x266A,
+ "eighthnotebeamed": 0x266B,
+ "musicalnotedbl": 0x266B,
+ "beamedsixteenthnotes": 0x266C,
+ "musicflatsign": 0x266D,
+ "musicsharpsign": 0x266F,
+ "checkmark": 0x2713,
+ "onecircleinversesansserif": 0x278A,
+ "twocircleinversesansserif": 0x278B,
+ "threecircleinversesansserif": 0x278C,
+ "fourcircleinversesansserif": 0x278D,
+ "fivecircleinversesansserif": 0x278E,
+ "sixcircleinversesansserif": 0x278F,
+ "sevencircleinversesansserif": 0x2790,
+ "eightcircleinversesansserif": 0x2791,
+ "ninecircleinversesansserif": 0x2792,
+ "arrowrightheavy": 0x279E,
+ "ideographicspace": 0x3000,
+ "ideographiccomma": 0x3001,
+ "ideographicperiod": 0x3002,
+ "dittomark": 0x3003,
+ "jis": 0x3004,
+ "ideographiciterationmark": 0x3005,
+ "ideographicclose": 0x3006,
+ "ideographiczero": 0x3007,
+ "anglebracketleft": 0x3008,
+ "anglebracketright": 0x3009,
+ "dblanglebracketleft": 0x300A,
+ "dblanglebracketright": 0x300B,
+ "cornerbracketleft": 0x300C,
+ "cornerbracketright": 0x300D,
+ "whitecornerbracketleft": 0x300E,
+ "whitecornerbracketright": 0x300F,
+ "blacklenticularbracketleft": 0x3010,
+ "blacklenticularbracketright": 0x3011,
+ "postalmark": 0x3012,
+ "getamark": 0x3013,
+ "tortoiseshellbracketleft": 0x3014,
+ "tortoiseshellbracketright": 0x3015,
+ "whitelenticularbracketleft": 0x3016,
+ "whitelenticularbracketright": 0x3017,
+ "whitetortoiseshellbracketleft": 0x3018,
+ "whitetortoiseshellbracketright": 0x3019,
+ "wavedash": 0x301C,
+ "quotedblprimereversed": 0x301D,
+ "quotedblprime": 0x301E,
+ "postalmarkface": 0x3020,
+ "onehangzhou": 0x3021,
+ "twohangzhou": 0x3022,
+ "threehangzhou": 0x3023,
+ "fourhangzhou": 0x3024,
+ "fivehangzhou": 0x3025,
+ "sixhangzhou": 0x3026,
+ "sevenhangzhou": 0x3027,
+ "eighthangzhou": 0x3028,
+ "ninehangzhou": 0x3029,
+ "circlepostalmark": 0x3036,
+ "asmallhiragana": 0x3041,
+ "ahiragana": 0x3042,
+ "ismallhiragana": 0x3043,
+ "ihiragana": 0x3044,
+ "usmallhiragana": 0x3045,
+ "uhiragana": 0x3046,
+ "esmallhiragana": 0x3047,
+ "ehiragana": 0x3048,
+ "osmallhiragana": 0x3049,
+ "ohiragana": 0x304A,
+ "kahiragana": 0x304B,
+ "gahiragana": 0x304C,
+ "kihiragana": 0x304D,
+ "gihiragana": 0x304E,
+ "kuhiragana": 0x304F,
+ "guhiragana": 0x3050,
+ "kehiragana": 0x3051,
+ "gehiragana": 0x3052,
+ "kohiragana": 0x3053,
+ "gohiragana": 0x3054,
+ "sahiragana": 0x3055,
+ "zahiragana": 0x3056,
+ "sihiragana": 0x3057,
+ "zihiragana": 0x3058,
+ "suhiragana": 0x3059,
+ "zuhiragana": 0x305A,
+ "sehiragana": 0x305B,
+ "zehiragana": 0x305C,
+ "sohiragana": 0x305D,
+ "zohiragana": 0x305E,
+ "tahiragana": 0x305F,
+ "dahiragana": 0x3060,
+ "tihiragana": 0x3061,
+ "dihiragana": 0x3062,
+ "tusmallhiragana": 0x3063,
+ "tuhiragana": 0x3064,
+ "duhiragana": 0x3065,
+ "tehiragana": 0x3066,
+ "dehiragana": 0x3067,
+ "tohiragana": 0x3068,
+ "dohiragana": 0x3069,
+ "nahiragana": 0x306A,
+ "nihiragana": 0x306B,
+ "nuhiragana": 0x306C,
+ "nehiragana": 0x306D,
+ "nohiragana": 0x306E,
+ "hahiragana": 0x306F,
+ "bahiragana": 0x3070,
+ "pahiragana": 0x3071,
+ "hihiragana": 0x3072,
+ "bihiragana": 0x3073,
+ "pihiragana": 0x3074,
+ "huhiragana": 0x3075,
+ "buhiragana": 0x3076,
+ "puhiragana": 0x3077,
+ "hehiragana": 0x3078,
+ "behiragana": 0x3079,
+ "pehiragana": 0x307A,
+ "hohiragana": 0x307B,
+ "bohiragana": 0x307C,
+ "pohiragana": 0x307D,
+ "mahiragana": 0x307E,
+ "mihiragana": 0x307F,
+ "muhiragana": 0x3080,
+ "mehiragana": 0x3081,
+ "mohiragana": 0x3082,
+ "yasmallhiragana": 0x3083,
+ "yahiragana": 0x3084,
+ "yusmallhiragana": 0x3085,
+ "yuhiragana": 0x3086,
+ "yosmallhiragana": 0x3087,
+ "yohiragana": 0x3088,
+ "rahiragana": 0x3089,
+ "rihiragana": 0x308A,
+ "ruhiragana": 0x308B,
+ "rehiragana": 0x308C,
+ "rohiragana": 0x308D,
+ "wasmallhiragana": 0x308E,
+ "wahiragana": 0x308F,
+ "wihiragana": 0x3090,
+ "wehiragana": 0x3091,
+ "wohiragana": 0x3092,
+ "nhiragana": 0x3093,
+ "vuhiragana": 0x3094,
+ "voicedmarkkana": 0x309B,
+ "semivoicedmarkkana": 0x309C,
+ "iterationhiragana": 0x309D,
+ "voicediterationhiragana": 0x309E,
+ "asmallkatakana": 0x30A1,
+ "akatakana": 0x30A2,
+ "ismallkatakana": 0x30A3,
+ "ikatakana": 0x30A4,
+ "usmallkatakana": 0x30A5,
+ "ukatakana": 0x30A6,
+ "esmallkatakana": 0x30A7,
+ "ekatakana": 0x30A8,
+ "osmallkatakana": 0x30A9,
+ "okatakana": 0x30AA,
+ "kakatakana": 0x30AB,
+ "gakatakana": 0x30AC,
+ "kikatakana": 0x30AD,
+ "gikatakana": 0x30AE,
+ "kukatakana": 0x30AF,
+ "gukatakana": 0x30B0,
+ "kekatakana": 0x30B1,
+ "gekatakana": 0x30B2,
+ "kokatakana": 0x30B3,
+ "gokatakana": 0x30B4,
+ "sakatakana": 0x30B5,
+ "zakatakana": 0x30B6,
+ "sikatakana": 0x30B7,
+ "zikatakana": 0x30B8,
+ "sukatakana": 0x30B9,
+ "zukatakana": 0x30BA,
+ "sekatakana": 0x30BB,
+ "zekatakana": 0x30BC,
+ "sokatakana": 0x30BD,
+ "zokatakana": 0x30BE,
+ "takatakana": 0x30BF,
+ "dakatakana": 0x30C0,
+ "tikatakana": 0x30C1,
+ "dikatakana": 0x30C2,
+ "tusmallkatakana": 0x30C3,
+ "tukatakana": 0x30C4,
+ "dukatakana": 0x30C5,
+ "tekatakana": 0x30C6,
+ "dekatakana": 0x30C7,
+ "tokatakana": 0x30C8,
+ "dokatakana": 0x30C9,
+ "nakatakana": 0x30CA,
+ "nikatakana": 0x30CB,
+ "nukatakana": 0x30CC,
+ "nekatakana": 0x30CD,
+ "nokatakana": 0x30CE,
+ "hakatakana": 0x30CF,
+ "bakatakana": 0x30D0,
+ "pakatakana": 0x30D1,
+ "hikatakana": 0x30D2,
+ "bikatakana": 0x30D3,
+ "pikatakana": 0x30D4,
+ "hukatakana": 0x30D5,
+ "bukatakana": 0x30D6,
+ "pukatakana": 0x30D7,
+ "hekatakana": 0x30D8,
+ "bekatakana": 0x30D9,
+ "pekatakana": 0x30DA,
+ "hokatakana": 0x30DB,
+ "bokatakana": 0x30DC,
+ "pokatakana": 0x30DD,
+ "makatakana": 0x30DE,
+ "mikatakana": 0x30DF,
+ "mukatakana": 0x30E0,
+ "mekatakana": 0x30E1,
+ "mokatakana": 0x30E2,
+ "yasmallkatakana": 0x30E3,
+ "yakatakana": 0x30E4,
+ "yusmallkatakana": 0x30E5,
+ "yukatakana": 0x30E6,
+ "yosmallkatakana": 0x30E7,
+ "yokatakana": 0x30E8,
+ "rakatakana": 0x30E9,
+ "rikatakana": 0x30EA,
+ "rukatakana": 0x30EB,
+ "rekatakana": 0x30EC,
+ "rokatakana": 0x30ED,
+ "wasmallkatakana": 0x30EE,
+ "wakatakana": 0x30EF,
+ "wikatakana": 0x30F0,
+ "wekatakana": 0x30F1,
+ "wokatakana": 0x30F2,
+ "nkatakana": 0x30F3,
+ "vukatakana": 0x30F4,
+ "kasmallkatakana": 0x30F5,
+ "kesmallkatakana": 0x30F6,
+ "vakatakana": 0x30F7,
+ "vikatakana": 0x30F8,
+ "vekatakana": 0x30F9,
+ "vokatakana": 0x30FA,
+ "dotkatakana": 0x30FB,
+ "prolongedkana": 0x30FC,
+ "iterationkatakana": 0x30FD,
+ "voicediterationkatakana": 0x30FE,
+ "bbopomofo": 0x3105,
+ "pbopomofo": 0x3106,
+ "mbopomofo": 0x3107,
+ "fbopomofo": 0x3108,
+ "dbopomofo": 0x3109,
+ "tbopomofo": 0x310A,
+ "nbopomofo": 0x310B,
+ "lbopomofo": 0x310C,
+ "gbopomofo": 0x310D,
+ "kbopomofo": 0x310E,
+ "hbopomofo": 0x310F,
+ "jbopomofo": 0x3110,
+ "qbopomofo": 0x3111,
+ "xbopomofo": 0x3112,
+ "zhbopomofo": 0x3113,
+ "chbopomofo": 0x3114,
+ "shbopomofo": 0x3115,
+ "rbopomofo": 0x3116,
+ "zbopomofo": 0x3117,
+ "cbopomofo": 0x3118,
+ "sbopomofo": 0x3119,
+ "abopomofo": 0x311A,
+ "obopomofo": 0x311B,
+ "ebopomofo": 0x311C,
+ "ehbopomofo": 0x311D,
+ "aibopomofo": 0x311E,
+ "eibopomofo": 0x311F,
+ "aubopomofo": 0x3120,
+ "oubopomofo": 0x3121,
+ "anbopomofo": 0x3122,
+ "enbopomofo": 0x3123,
+ "angbopomofo": 0x3124,
+ "engbopomofo": 0x3125,
+ "erbopomofo": 0x3126,
+ "ibopomofo": 0x3127,
+ "ubopomofo": 0x3128,
+ "iubopomofo": 0x3129,
+ "kiyeokkorean": 0x3131,
+ "ssangkiyeokkorean": 0x3132,
+ "kiyeoksioskorean": 0x3133,
+ "nieunkorean": 0x3134,
+ "nieuncieuckorean": 0x3135,
+ "nieunhieuhkorean": 0x3136,
+ "tikeutkorean": 0x3137,
+ "ssangtikeutkorean": 0x3138,
+ "rieulkorean": 0x3139,
+ "rieulkiyeokkorean": 0x313A,
+ "rieulmieumkorean": 0x313B,
+ "rieulpieupkorean": 0x313C,
+ "rieulsioskorean": 0x313D,
+ "rieulthieuthkorean": 0x313E,
+ "rieulphieuphkorean": 0x313F,
+ "rieulhieuhkorean": 0x3140,
+ "mieumkorean": 0x3141,
+ "pieupkorean": 0x3142,
+ "ssangpieupkorean": 0x3143,
+ "pieupsioskorean": 0x3144,
+ "sioskorean": 0x3145,
+ "ssangsioskorean": 0x3146,
+ "ieungkorean": 0x3147,
+ "cieuckorean": 0x3148,
+ "ssangcieuckorean": 0x3149,
+ "chieuchkorean": 0x314A,
+ "khieukhkorean": 0x314B,
+ "thieuthkorean": 0x314C,
+ "phieuphkorean": 0x314D,
+ "hieuhkorean": 0x314E,
+ "akorean": 0x314F,
+ "aekorean": 0x3150,
+ "yakorean": 0x3151,
+ "yaekorean": 0x3152,
+ "eokorean": 0x3153,
+ "ekorean": 0x3154,
+ "yeokorean": 0x3155,
+ "yekorean": 0x3156,
+ "okorean": 0x3157,
+ "wakorean": 0x3158,
+ "waekorean": 0x3159,
+ "oekorean": 0x315A,
+ "yokorean": 0x315B,
+ "ukorean": 0x315C,
+ "weokorean": 0x315D,
+ "wekorean": 0x315E,
+ "wikorean": 0x315F,
+ "yukorean": 0x3160,
+ "eukorean": 0x3161,
+ "yikorean": 0x3162,
+ "ikorean": 0x3163,
+ "hangulfiller": 0x3164,
+ "ssangnieunkorean": 0x3165,
+ "nieuntikeutkorean": 0x3166,
+ "nieunsioskorean": 0x3167,
+ "nieunpansioskorean": 0x3168,
+ "rieulkiyeoksioskorean": 0x3169,
+ "rieultikeutkorean": 0x316A,
+ "rieulpieupsioskorean": 0x316B,
+ "rieulpansioskorean": 0x316C,
+ "rieulyeorinhieuhkorean": 0x316D,
+ "mieumpieupkorean": 0x316E,
+ "mieumsioskorean": 0x316F,
+ "mieumpansioskorean": 0x3170,
+ "kapyeounmieumkorean": 0x3171,
+ "pieupkiyeokkorean": 0x3172,
+ "pieuptikeutkorean": 0x3173,
+ "pieupsioskiyeokkorean": 0x3174,
+ "pieupsiostikeutkorean": 0x3175,
+ "pieupcieuckorean": 0x3176,
+ "pieupthieuthkorean": 0x3177,
+ "kapyeounpieupkorean": 0x3178,
+ "kapyeounssangpieupkorean": 0x3179,
+ "sioskiyeokkorean": 0x317A,
+ "siosnieunkorean": 0x317B,
+ "siostikeutkorean": 0x317C,
+ "siospieupkorean": 0x317D,
+ "sioscieuckorean": 0x317E,
+ "pansioskorean": 0x317F,
+ "ssangieungkorean": 0x3180,
+ "yesieungkorean": 0x3181,
+ "yesieungsioskorean": 0x3182,
+ "yesieungpansioskorean": 0x3183,
+ "kapyeounphieuphkorean": 0x3184,
+ "ssanghieuhkorean": 0x3185,
+ "yeorinhieuhkorean": 0x3186,
+ "yoyakorean": 0x3187,
+ "yoyaekorean": 0x3188,
+ "yoikorean": 0x3189,
+ "yuyeokorean": 0x318A,
+ "yuyekorean": 0x318B,
+ "yuikorean": 0x318C,
+ "araeakorean": 0x318D,
+ "araeaekorean": 0x318E,
+ "kiyeokparenkorean": 0x3200,
+ "nieunparenkorean": 0x3201,
+ "tikeutparenkorean": 0x3202,
+ "rieulparenkorean": 0x3203,
+ "mieumparenkorean": 0x3204,
+ "pieupparenkorean": 0x3205,
+ "siosparenkorean": 0x3206,
+ "ieungparenkorean": 0x3207,
+ "cieucparenkorean": 0x3208,
+ "chieuchparenkorean": 0x3209,
+ "khieukhparenkorean": 0x320A,
+ "thieuthparenkorean": 0x320B,
+ "phieuphparenkorean": 0x320C,
+ "hieuhparenkorean": 0x320D,
+ "kiyeokaparenkorean": 0x320E,
+ "nieunaparenkorean": 0x320F,
+ "tikeutaparenkorean": 0x3210,
+ "rieulaparenkorean": 0x3211,
+ "mieumaparenkorean": 0x3212,
+ "pieupaparenkorean": 0x3213,
+ "siosaparenkorean": 0x3214,
+ "ieungaparenkorean": 0x3215,
+ "cieucaparenkorean": 0x3216,
+ "chieuchaparenkorean": 0x3217,
+ "khieukhaparenkorean": 0x3218,
+ "thieuthaparenkorean": 0x3219,
+ "phieuphaparenkorean": 0x321A,
+ "hieuhaparenkorean": 0x321B,
+ "cieucuparenkorean": 0x321C,
+ "oneideographicparen": 0x3220,
+ "twoideographicparen": 0x3221,
+ "threeideographicparen": 0x3222,
+ "fourideographicparen": 0x3223,
+ "fiveideographicparen": 0x3224,
+ "sixideographicparen": 0x3225,
+ "sevenideographicparen": 0x3226,
+ "eightideographicparen": 0x3227,
+ "nineideographicparen": 0x3228,
+ "tenideographicparen": 0x3229,
+ "ideographicmoonparen": 0x322A,
+ "ideographicfireparen": 0x322B,
+ "ideographicwaterparen": 0x322C,
+ "ideographicwoodparen": 0x322D,
+ "ideographicmetalparen": 0x322E,
+ "ideographicearthparen": 0x322F,
+ "ideographicsunparen": 0x3230,
+ "ideographicstockparen": 0x3231,
+ "ideographichaveparen": 0x3232,
+ "ideographicsocietyparen": 0x3233,
+ "ideographicnameparen": 0x3234,
+ "ideographicspecialparen": 0x3235,
+ "ideographicfinancialparen": 0x3236,
+ "ideographiccongratulationparen": 0x3237,
+ "ideographiclaborparen": 0x3238,
+ "ideographicrepresentparen": 0x3239,
+ "ideographiccallparen": 0x323A,
+ "ideographicstudyparen": 0x323B,
+ "ideographicsuperviseparen": 0x323C,
+ "ideographicenterpriseparen": 0x323D,
+ "ideographicresourceparen": 0x323E,
+ "ideographicallianceparen": 0x323F,
+ "ideographicfestivalparen": 0x3240,
+ "ideographicselfparen": 0x3242,
+ "ideographicreachparen": 0x3243,
+ "kiyeokcirclekorean": 0x3260,
+ "nieuncirclekorean": 0x3261,
+ "tikeutcirclekorean": 0x3262,
+ "rieulcirclekorean": 0x3263,
+ "mieumcirclekorean": 0x3264,
+ "pieupcirclekorean": 0x3265,
+ "sioscirclekorean": 0x3266,
+ "ieungcirclekorean": 0x3267,
+ "cieuccirclekorean": 0x3268,
+ "chieuchcirclekorean": 0x3269,
+ "khieukhcirclekorean": 0x326A,
+ "thieuthcirclekorean": 0x326B,
+ "phieuphcirclekorean": 0x326C,
+ "hieuhcirclekorean": 0x326D,
+ "kiyeokacirclekorean": 0x326E,
+ "nieunacirclekorean": 0x326F,
+ "tikeutacirclekorean": 0x3270,
+ "rieulacirclekorean": 0x3271,
+ "mieumacirclekorean": 0x3272,
+ "pieupacirclekorean": 0x3273,
+ "siosacirclekorean": 0x3274,
+ "ieungacirclekorean": 0x3275,
+ "cieucacirclekorean": 0x3276,
+ "chieuchacirclekorean": 0x3277,
+ "khieukhacirclekorean": 0x3278,
+ "thieuthacirclekorean": 0x3279,
+ "phieuphacirclekorean": 0x327A,
+ "hieuhacirclekorean": 0x327B,
+ "koreanstandardsymbol": 0x327F,
+ "ideographmooncircle": 0x328A,
+ "ideographfirecircle": 0x328B,
+ "ideographwatercircle": 0x328C,
+ "ideographwoodcircle": 0x328D,
+ "ideographmetalcircle": 0x328E,
+ "ideographearthcircle": 0x328F,
+ "ideographsuncircle": 0x3290,
+ "ideographnamecircle": 0x3294,
+ "ideographicfinancialcircle": 0x3296,
+ "ideographiclaborcircle": 0x3298,
+ "ideographicsecretcircle": 0x3299,
+ "ideographicexcellentcircle": 0x329D,
+ "ideographicprintcircle": 0x329E,
+ "ideographiccorrectcircle": 0x32A3,
+ "ideographichighcircle": 0x32A4,
+ "ideographiccentrecircle": 0x32A5,
+ "ideographiclowcircle": 0x32A6,
+ "ideographicleftcircle": 0x32A7,
+ "ideographicrightcircle": 0x32A8,
+ "ideographicmedicinecircle": 0x32A9,
+ "apaatosquare": 0x3300,
+ "aarusquare": 0x3303,
+ "intisquare": 0x3305,
+ "karoriisquare": 0x330D,
+ "kirosquare": 0x3314,
+ "kiroguramusquare": 0x3315,
+ "kiromeetorusquare": 0x3316,
+ "guramusquare": 0x3318,
+ "kooposquare": 0x331E,
+ "sentisquare": 0x3322,
+ "sentosquare": 0x3323,
+ "dorusquare": 0x3326,
+ "tonsquare": 0x3327,
+ "haitusquare": 0x332A,
+ "paasentosquare": 0x332B,
+ "birusquare": 0x3331,
+ "huiitosquare": 0x3333,
+ "hekutaarusquare": 0x3336,
+ "herutusquare": 0x3339,
+ "peezisquare": 0x333B,
+ "hoonsquare": 0x3342,
+ "mansyonsquare": 0x3347,
+ "mirisquare": 0x3349,
+ "miribaarusquare": 0x334A,
+ "meetorusquare": 0x334D,
+ "yaadosquare": 0x334E,
+ "rittorusquare": 0x3351,
+ "wattosquare": 0x3357,
+ "heiseierasquare": 0x337B,
+ "syouwaerasquare": 0x337C,
+ "taisyouerasquare": 0x337D,
+ "meizierasquare": 0x337E,
+ "corporationsquare": 0x337F,
+ "paampssquare": 0x3380,
+ "nasquare": 0x3381,
+ "muasquare": 0x3382,
+ "masquare": 0x3383,
+ "kasquare": 0x3384,
+ "KBsquare": 0x3385,
+ "MBsquare": 0x3386,
+ "GBsquare": 0x3387,
+ "calsquare": 0x3388,
+ "kcalsquare": 0x3389,
+ "pfsquare": 0x338A,
+ "nfsquare": 0x338B,
+ "mufsquare": 0x338C,
+ "mugsquare": 0x338D,
+ "squaremg": 0x338E,
+ "squarekg": 0x338F,
+ "Hzsquare": 0x3390,
+ "khzsquare": 0x3391,
+ "mhzsquare": 0x3392,
+ "ghzsquare": 0x3393,
+ "thzsquare": 0x3394,
+ "mulsquare": 0x3395,
+ "mlsquare": 0x3396,
+ "dlsquare": 0x3397,
+ "klsquare": 0x3398,
+ "fmsquare": 0x3399,
+ "nmsquare": 0x339A,
+ "mumsquare": 0x339B,
+ "squaremm": 0x339C,
+ "squarecm": 0x339D,
+ "squarekm": 0x339E,
+ "mmsquaredsquare": 0x339F,
+ "cmsquaredsquare": 0x33A0,
+ "squaremsquared": 0x33A1,
+ "kmsquaredsquare": 0x33A2,
+ "mmcubedsquare": 0x33A3,
+ "cmcubedsquare": 0x33A4,
+ "mcubedsquare": 0x33A5,
+ "kmcubedsquare": 0x33A6,
+ "moverssquare": 0x33A7,
+ "moverssquaredsquare": 0x33A8,
+ "pasquare": 0x33A9,
+ "kpasquare": 0x33AA,
+ "mpasquare": 0x33AB,
+ "gpasquare": 0x33AC,
+ "radsquare": 0x33AD,
+ "radoverssquare": 0x33AE,
+ "radoverssquaredsquare": 0x33AF,
+ "pssquare": 0x33B0,
+ "nssquare": 0x33B1,
+ "mussquare": 0x33B2,
+ "mssquare": 0x33B3,
+ "pvsquare": 0x33B4,
+ "nvsquare": 0x33B5,
+ "muvsquare": 0x33B6,
+ "mvsquare": 0x33B7,
+ "kvsquare": 0x33B8,
+ "mvmegasquare": 0x33B9,
+ "pwsquare": 0x33BA,
+ "nwsquare": 0x33BB,
+ "muwsquare": 0x33BC,
+ "mwsquare": 0x33BD,
+ "kwsquare": 0x33BE,
+ "mwmegasquare": 0x33BF,
+ "kohmsquare": 0x33C0,
+ "mohmsquare": 0x33C1,
+ "amsquare": 0x33C2,
+ "bqsquare": 0x33C3,
+ "squarecc": 0x33C4,
+ "cdsquare": 0x33C5,
+ "coverkgsquare": 0x33C6,
+ "cosquare": 0x33C7,
+ "dbsquare": 0x33C8,
+ "gysquare": 0x33C9,
+ "hasquare": 0x33CA,
+ "HPsquare": 0x33CB,
+ "KKsquare": 0x33CD,
+ "squarekmcapital": 0x33CE,
+ "ktsquare": 0x33CF,
+ "lmsquare": 0x33D0,
+ "squareln": 0x33D1,
+ "squarelog": 0x33D2,
+ "lxsquare": 0x33D3,
+ "mbsquare": 0x33D4,
+ "squaremil": 0x33D5,
+ "molsquare": 0x33D6,
+ "pmsquare": 0x33D8,
+ "srsquare": 0x33DB,
+ "svsquare": 0x33DC,
+ "wbsquare": 0x33DD,
+ "twentyhangzhou": 0x5344,
+ "dotlessj": 0xF6BE,
+ "LL": 0xF6BF,
+ "ll": 0xF6C0,
+ "commaaccent": 0xF6C3,
+ "afii10063": 0xF6C4,
+ "afii10064": 0xF6C5,
+ "afii10192": 0xF6C6,
+ "afii10831": 0xF6C7,
+ "afii10832": 0xF6C8,
+ "Acute": 0xF6C9,
+ "Caron": 0xF6CA,
+ "Dieresis": 0xF6CB,
+ "DieresisAcute": 0xF6CC,
+ "DieresisGrave": 0xF6CD,
+ "Grave": 0xF6CE,
+ "Hungarumlaut": 0xF6CF,
+ "Macron": 0xF6D0,
+ "cyrBreve": 0xF6D1,
+ "cyrFlex": 0xF6D2,
+ "dblGrave": 0xF6D3,
+ "cyrbreve": 0xF6D4,
+ "cyrflex": 0xF6D5,
+ "dblgrave": 0xF6D6,
+ "dieresisacute": 0xF6D7,
+ "dieresisgrave": 0xF6D8,
+ "copyrightserif": 0xF6D9,
+ "registerserif": 0xF6DA,
+ "trademarkserif": 0xF6DB,
+ "onefitted": 0xF6DC,
+ "rupiah": 0xF6DD,
+ "threequartersemdash": 0xF6DE,
+ "centinferior": 0xF6DF,
+ "centsuperior": 0xF6E0,
+ "commainferior": 0xF6E1,
+ "commasuperior": 0xF6E2,
+ "dollarinferior": 0xF6E3,
+ "dollarsuperior": 0xF6E4,
+ "hypheninferior": 0xF6E5,
+ "hyphensuperior": 0xF6E6,
+ "periodinferior": 0xF6E7,
+ "periodsuperior": 0xF6E8,
+ "asuperior": 0xF6E9,
+ "bsuperior": 0xF6EA,
+ "dsuperior": 0xF6EB,
+ "esuperior": 0xF6EC,
+ "isuperior": 0xF6ED,
+ "lsuperior": 0xF6EE,
+ "msuperior": 0xF6EF,
+ "osuperior": 0xF6F0,
+ "rsuperior": 0xF6F1,
+ "ssuperior": 0xF6F2,
+ "tsuperior": 0xF6F3,
+ "Brevesmall": 0xF6F4,
+ "Caronsmall": 0xF6F5,
+ "Circumflexsmall": 0xF6F6,
+ "Dotaccentsmall": 0xF6F7,
+ "Hungarumlautsmall": 0xF6F8,
+ "Lslashsmall": 0xF6F9,
+ "OEsmall": 0xF6FA,
+ "Ogoneksmall": 0xF6FB,
+ "Ringsmall": 0xF6FC,
+ "Scaronsmall": 0xF6FD,
+ "Tildesmall": 0xF6FE,
+ "Zcaronsmall": 0xF6FF,
+ "exclamsmall": 0xF721,
+ "dollaroldstyle": 0xF724,
+ "ampersandsmall": 0xF726,
+ "zerooldstyle": 0xF730,
+ "oneoldstyle": 0xF731,
+ "twooldstyle": 0xF732,
+ "threeoldstyle": 0xF733,
+ "fouroldstyle": 0xF734,
+ "fiveoldstyle": 0xF735,
+ "sixoldstyle": 0xF736,
+ "sevenoldstyle": 0xF737,
+ "eightoldstyle": 0xF738,
+ "nineoldstyle": 0xF739,
+ "questionsmall": 0xF73F,
+ "Gravesmall": 0xF760,
+ "Asmall": 0xF761,
+ "Bsmall": 0xF762,
+ "Csmall": 0xF763,
+ "Dsmall": 0xF764,
+ "Esmall": 0xF765,
+ "Fsmall": 0xF766,
+ "Gsmall": 0xF767,
+ "Hsmall": 0xF768,
+ "Ismall": 0xF769,
+ "Jsmall": 0xF76A,
+ "Ksmall": 0xF76B,
+ "Lsmall": 0xF76C,
+ "Msmall": 0xF76D,
+ "Nsmall": 0xF76E,
+ "Osmall": 0xF76F,
+ "Psmall": 0xF770,
+ "Qsmall": 0xF771,
+ "Rsmall": 0xF772,
+ "Ssmall": 0xF773,
+ "Tsmall": 0xF774,
+ "Usmall": 0xF775,
+ "Vsmall": 0xF776,
+ "Wsmall": 0xF777,
+ "Xsmall": 0xF778,
+ "Ysmall": 0xF779,
+ "Zsmall": 0xF77A,
+ "exclamdownsmall": 0xF7A1,
+ "centoldstyle": 0xF7A2,
+ "Dieresissmall": 0xF7A8,
+ "Macronsmall": 0xF7AF,
+ "Acutesmall": 0xF7B4,
+ "Cedillasmall": 0xF7B8,
+ "questiondownsmall": 0xF7BF,
+ "Agravesmall": 0xF7E0,
+ "Aacutesmall": 0xF7E1,
+ "Acircumflexsmall": 0xF7E2,
+ "Atildesmall": 0xF7E3,
+ "Adieresissmall": 0xF7E4,
+ "Aringsmall": 0xF7E5,
+ "AEsmall": 0xF7E6,
+ "Ccedillasmall": 0xF7E7,
+ "Egravesmall": 0xF7E8,
+ "Eacutesmall": 0xF7E9,
+ "Ecircumflexsmall": 0xF7EA,
+ "Edieresissmall": 0xF7EB,
+ "Igravesmall": 0xF7EC,
+ "Iacutesmall": 0xF7ED,
+ "Icircumflexsmall": 0xF7EE,
+ "Idieresissmall": 0xF7EF,
+ "Ethsmall": 0xF7F0,
+ "Ntildesmall": 0xF7F1,
+ "Ogravesmall": 0xF7F2,
+ "Oacutesmall": 0xF7F3,
+ "Ocircumflexsmall": 0xF7F4,
+ "Otildesmall": 0xF7F5,
+ "Odieresissmall": 0xF7F6,
+ "Oslashsmall": 0xF7F8,
+ "Ugravesmall": 0xF7F9,
+ "Uacutesmall": 0xF7FA,
+ "Ucircumflexsmall": 0xF7FB,
+ "Udieresissmall": 0xF7FC,
+ "Yacutesmall": 0xF7FD,
+ "Thornsmall": 0xF7FE,
+ "Ydieresissmall": 0xF7FF,
+ "maihanakatleftthai": 0xF884,
+ "saraileftthai": 0xF885,
+ "saraiileftthai": 0xF886,
+ "saraueleftthai": 0xF887,
+ "saraueeleftthai": 0xF888,
+ "maitaikhuleftthai": 0xF889,
+ "maiekupperleftthai": 0xF88A,
+ "maieklowrightthai": 0xF88B,
+ "maieklowleftthai": 0xF88C,
+ "maithoupperleftthai": 0xF88D,
+ "maitholowrightthai": 0xF88E,
+ "maitholowleftthai": 0xF88F,
+ "maitriupperleftthai": 0xF890,
+ "maitrilowrightthai": 0xF891,
+ "maitrilowleftthai": 0xF892,
+ "maichattawaupperleftthai": 0xF893,
+ "maichattawalowrightthai": 0xF894,
+ "maichattawalowleftthai": 0xF895,
+ "thanthakhatupperleftthai": 0xF896,
+ "thanthakhatlowrightthai": 0xF897,
+ "thanthakhatlowleftthai": 0xF898,
+ "nikhahitleftthai": 0xF899,
+ "radicalex": 0xF8E5,
+ "arrowvertex": 0xF8E6,
+ "arrowhorizex": 0xF8E7,
+ "registersans": 0xF8E8,
+ "copyrightsans": 0xF8E9,
+ "trademarksans": 0xF8EA,
+ "parenlefttp": 0xF8EB,
+ "parenleftex": 0xF8EC,
+ "parenleftbt": 0xF8ED,
+ "bracketlefttp": 0xF8EE,
+ "bracketleftex": 0xF8EF,
+ "bracketleftbt": 0xF8F0,
+ "bracelefttp": 0xF8F1,
+ "braceleftmid": 0xF8F2,
+ "braceleftbt": 0xF8F3,
+ "braceex": 0xF8F4,
+ "integralex": 0xF8F5,
+ "parenrighttp": 0xF8F6,
+ "parenrightex": 0xF8F7,
+ "parenrightbt": 0xF8F8,
+ "bracketrighttp": 0xF8F9,
+ "bracketrightex": 0xF8FA,
+ "bracketrightbt": 0xF8FB,
+ "bracerighttp": 0xF8FC,
+ "bracerightmid": 0xF8FD,
+ "bracerightbt": 0xF8FE,
+ "apple": 0xF8FF,
+ "ff": 0xFB00,
+ "fi": 0xFB01,
+ "fl": 0xFB02,
+ "ffi": 0xFB03,
+ "ffl": 0xFB04,
+ "afii57705": 0xFB1F,
+ "doubleyodpatah": 0xFB1F,
+ "doubleyodpatahhebrew": 0xFB1F,
+ "yodyodpatahhebrew": 0xFB1F,
+ "ayinaltonehebrew": 0xFB20,
+ "afii57694": 0xFB2A,
+ "shinshindot": 0xFB2A,
+ "shinshindothebrew": 0xFB2A,
+ "afii57695": 0xFB2B,
+ "shinsindot": 0xFB2B,
+ "shinsindothebrew": 0xFB2B,
+ "shindageshshindot": 0xFB2C,
+ "shindageshshindothebrew": 0xFB2C,
+ "shindageshsindot": 0xFB2D,
+ "shindageshsindothebrew": 0xFB2D,
+ "alefpatahhebrew": 0xFB2E,
+ "alefqamatshebrew": 0xFB2F,
+ "alefdageshhebrew": 0xFB30,
+ "betdagesh": 0xFB31,
+ "betdageshhebrew": 0xFB31,
+ "gimeldagesh": 0xFB32,
+ "gimeldageshhebrew": 0xFB32,
+ "daletdagesh": 0xFB33,
+ "daletdageshhebrew": 0xFB33,
+ "hedagesh": 0xFB34,
+ "hedageshhebrew": 0xFB34,
+ "afii57723": 0xFB35,
+ "vavdagesh": 0xFB35,
+ "vavdagesh65": 0xFB35,
+ "vavdageshhebrew": 0xFB35,
+ "zayindagesh": 0xFB36,
+ "zayindageshhebrew": 0xFB36,
+ "tetdagesh": 0xFB38,
+ "tetdageshhebrew": 0xFB38,
+ "yoddagesh": 0xFB39,
+ "yoddageshhebrew": 0xFB39,
+ "finalkafdagesh": 0xFB3A,
+ "finalkafdageshhebrew": 0xFB3A,
+ "kafdagesh": 0xFB3B,
+ "kafdageshhebrew": 0xFB3B,
+ "lameddagesh": 0xFB3C,
+ "lameddageshhebrew": 0xFB3C,
+ "memdagesh": 0xFB3E,
+ "memdageshhebrew": 0xFB3E,
+ "nundagesh": 0xFB40,
+ "nundageshhebrew": 0xFB40,
+ "samekhdagesh": 0xFB41,
+ "samekhdageshhebrew": 0xFB41,
+ "pefinaldageshhebrew": 0xFB43,
+ "pedagesh": 0xFB44,
+ "pedageshhebrew": 0xFB44,
+ "tsadidagesh": 0xFB46,
+ "tsadidageshhebrew": 0xFB46,
+ "qofdagesh": 0xFB47,
+ "qofdageshhebrew": 0xFB47,
+ "reshdageshhebrew": 0xFB48,
+ "shindagesh": 0xFB49,
+ "shindageshhebrew": 0xFB49,
+ "tavdages": 0xFB4A,
+ "tavdagesh": 0xFB4A,
+ "tavdageshhebrew": 0xFB4A,
+ "afii57700": 0xFB4B,
+ "vavholam": 0xFB4B,
+ "vavholamhebrew": 0xFB4B,
+ "betrafehebrew": 0xFB4C,
+ "kafrafehebrew": 0xFB4D,
+ "perafehebrew": 0xFB4E,
+ "aleflamedhebrew": 0xFB4F,
+ "pehfinalarabic": 0xFB57,
+ "pehinitialarabic": 0xFB58,
+ "pehmedialarabic": 0xFB59,
+ "ttehfinalarabic": 0xFB67,
+ "ttehinitialarabic": 0xFB68,
+ "ttehmedialarabic": 0xFB69,
+ "vehfinalarabic": 0xFB6B,
+ "vehinitialarabic": 0xFB6C,
+ "vehmedialarabic": 0xFB6D,
+ "tchehfinalarabic": 0xFB7B,
+ "tchehinitialarabic": 0xFB7C,
+ "tchehmeeminitialarabic": 0xFB7C,
+ "tchehmedialarabic": 0xFB7D,
+ "ddalfinalarabic": 0xFB89,
+ "jehfinalarabic": 0xFB8B,
+ "rrehfinalarabic": 0xFB8D,
+ "gaffinalarabic": 0xFB93,
+ "gafinitialarabic": 0xFB94,
+ "gafmedialarabic": 0xFB95,
+ "noonghunnafinalarabic": 0xFB9F,
+ "hehhamzaaboveisolatedarabic": 0xFBA4,
+ "hehhamzaabovefinalarabic": 0xFBA5,
+ "hehfinalaltonearabic": 0xFBA7,
+ "hehinitialaltonearabic": 0xFBA8,
+ "hehmedialaltonearabic": 0xFBA9,
+ "yehbarreefinalarabic": 0xFBAF,
+ "behmeemisolatedarabic": 0xFC08,
+ "tehjeemisolatedarabic": 0xFC0B,
+ "tehhahisolatedarabic": 0xFC0C,
+ "tehmeemisolatedarabic": 0xFC0E,
+ "meemmeemisolatedarabic": 0xFC48,
+ "noonjeemisolatedarabic": 0xFC4B,
+ "noonmeemisolatedarabic": 0xFC4E,
+ "yehmeemisolatedarabic": 0xFC58,
+ "shaddadammatanarabic": 0xFC5E,
+ "shaddakasratanarabic": 0xFC5F,
+ "shaddafathaarabic": 0xFC60,
+ "shaddadammaarabic": 0xFC61,
+ "shaddakasraarabic": 0xFC62,
+ "behnoonfinalarabic": 0xFC6D,
+ "tehnoonfinalarabic": 0xFC73,
+ "noonnoonfinalarabic": 0xFC8D,
+ "yehnoonfinalarabic": 0xFC94,
+ "behmeeminitialarabic": 0xFC9F,
+ "tehjeeminitialarabic": 0xFCA1,
+ "tehhahinitialarabic": 0xFCA2,
+ "tehmeeminitialarabic": 0xFCA4,
+ "lamjeeminitialarabic": 0xFCC9,
+ "lamhahinitialarabic": 0xFCCA,
+ "lamkhahinitialarabic": 0xFCCB,
+ "lammeeminitialarabic": 0xFCCC,
+ "meemmeeminitialarabic": 0xFCD1,
+ "noonjeeminitialarabic": 0xFCD2,
+ "noonmeeminitialarabic": 0xFCD5,
+ "yehmeeminitialarabic": 0xFCDD,
+ "parenleftaltonearabic": 0xFD3E,
+ "parenrightaltonearabic": 0xFD3F,
+ "lammeemhahinitialarabic": 0xFD88,
+ "lamlamhehisolatedarabic": 0xFDF2,
+ "sallallahoualayhewasallamarabic": 0xFDFA,
+ "twodotleadervertical": 0xFE30,
+ "emdashvertical": 0xFE31,
+ "endashvertical": 0xFE32,
+ "underscorevertical": 0xFE33,
+ "wavyunderscorevertical": 0xFE34,
+ "parenleftvertical": 0xFE35,
+ "parenrightvertical": 0xFE36,
+ "braceleftvertical": 0xFE37,
+ "bracerightvertical": 0xFE38,
+ "tortoiseshellbracketleftvertical": 0xFE39,
+ "tortoiseshellbracketrightvertical": 0xFE3A,
+ "blacklenticularbracketleftvertical": 0xFE3B,
+ "blacklenticularbracketrightvertical": 0xFE3C,
+ "dblanglebracketleftvertical": 0xFE3D,
+ "dblanglebracketrightvertical": 0xFE3E,
+ "anglebracketleftvertical": 0xFE3F,
+ "anglebracketrightvertical": 0xFE40,
+ "cornerbracketleftvertical": 0xFE41,
+ "cornerbracketrightvertical": 0xFE42,
+ "whitecornerbracketleftvertical": 0xFE43,
+ "whitecornerbracketrightvertical": 0xFE44,
+ "overlinedashed": 0xFE49,
+ "overlinecenterline": 0xFE4A,
+ "overlinewavy": 0xFE4B,
+ "overlinedblwavy": 0xFE4C,
+ "lowlinedashed": 0xFE4D,
+ "lowlinecenterline": 0xFE4E,
+ "underscorewavy": 0xFE4F,
+ "commasmall": 0xFE50,
+ "periodsmall": 0xFE52,
+ "semicolonsmall": 0xFE54,
+ "colonsmall": 0xFE55,
+ "parenleftsmall": 0xFE59,
+ "parenrightsmall": 0xFE5A,
+ "braceleftsmall": 0xFE5B,
+ "bracerightsmall": 0xFE5C,
+ "tortoiseshellbracketleftsmall": 0xFE5D,
+ "tortoiseshellbracketrightsmall": 0xFE5E,
+ "numbersignsmall": 0xFE5F,
+ "asterisksmall": 0xFE61,
+ "plussmall": 0xFE62,
+ "hyphensmall": 0xFE63,
+ "lesssmall": 0xFE64,
+ "greatersmall": 0xFE65,
+ "equalsmall": 0xFE66,
+ "dollarsmall": 0xFE69,
+ "percentsmall": 0xFE6A,
+ "atsmall": 0xFE6B,
+ "alefmaddaabovefinalarabic": 0xFE82,
+ "alefhamzaabovefinalarabic": 0xFE84,
+ "wawhamzaabovefinalarabic": 0xFE86,
+ "alefhamzabelowfinalarabic": 0xFE88,
+ "yehhamzaabovefinalarabic": 0xFE8A,
+ "yehhamzaaboveinitialarabic": 0xFE8B,
+ "yehhamzaabovemedialarabic": 0xFE8C,
+ "aleffinalarabic": 0xFE8E,
+ "behfinalarabic": 0xFE90,
+ "behinitialarabic": 0xFE91,
+ "behmedialarabic": 0xFE92,
+ "tehmarbutafinalarabic": 0xFE94,
+ "tehfinalarabic": 0xFE96,
+ "tehinitialarabic": 0xFE97,
+ "tehmedialarabic": 0xFE98,
+ "thehfinalarabic": 0xFE9A,
+ "thehinitialarabic": 0xFE9B,
+ "thehmedialarabic": 0xFE9C,
+ "jeemfinalarabic": 0xFE9E,
+ "jeeminitialarabic": 0xFE9F,
+ "jeemmedialarabic": 0xFEA0,
+ "hahfinalarabic": 0xFEA2,
+ "hahinitialarabic": 0xFEA3,
+ "hahmedialarabic": 0xFEA4,
+ "khahfinalarabic": 0xFEA6,
+ "khahinitialarabic": 0xFEA7,
+ "khahmedialarabic": 0xFEA8,
+ "dalfinalarabic": 0xFEAA,
+ "thalfinalarabic": 0xFEAC,
+ "rehfinalarabic": 0xFEAE,
+ "zainfinalarabic": 0xFEB0,
+ "seenfinalarabic": 0xFEB2,
+ "seeninitialarabic": 0xFEB3,
+ "seenmedialarabic": 0xFEB4,
+ "sheenfinalarabic": 0xFEB6,
+ "sheeninitialarabic": 0xFEB7,
+ "sheenmedialarabic": 0xFEB8,
+ "sadfinalarabic": 0xFEBA,
+ "sadinitialarabic": 0xFEBB,
+ "sadmedialarabic": 0xFEBC,
+ "dadfinalarabic": 0xFEBE,
+ "dadinitialarabic": 0xFEBF,
+ "dadmedialarabic": 0xFEC0,
+ "tahfinalarabic": 0xFEC2,
+ "tahinitialarabic": 0xFEC3,
+ "tahmedialarabic": 0xFEC4,
+ "zahfinalarabic": 0xFEC6,
+ "zahinitialarabic": 0xFEC7,
+ "zahmedialarabic": 0xFEC8,
+ "ainfinalarabic": 0xFECA,
+ "aininitialarabic": 0xFECB,
+ "ainmedialarabic": 0xFECC,
+ "ghainfinalarabic": 0xFECE,
+ "ghaininitialarabic": 0xFECF,
+ "ghainmedialarabic": 0xFED0,
+ "fehfinalarabic": 0xFED2,
+ "fehinitialarabic": 0xFED3,
+ "fehmedialarabic": 0xFED4,
+ "qaffinalarabic": 0xFED6,
+ "qafinitialarabic": 0xFED7,
+ "qafmedialarabic": 0xFED8,
+ "kaffinalarabic": 0xFEDA,
+ "kafinitialarabic": 0xFEDB,
+ "kafmedialarabic": 0xFEDC,
+ "lamfinalarabic": 0xFEDE,
+ "laminitialarabic": 0xFEDF,
+ "lammeemjeeminitialarabic": 0xFEDF,
+ "lammeemkhahinitialarabic": 0xFEDF,
+ "lammedialarabic": 0xFEE0,
+ "meemfinalarabic": 0xFEE2,
+ "meeminitialarabic": 0xFEE3,
+ "meemmedialarabic": 0xFEE4,
+ "noonfinalarabic": 0xFEE6,
+ "nooninitialarabic": 0xFEE7,
+ "noonhehinitialarabic": 0xFEE7,
+ "noonmedialarabic": 0xFEE8,
+ "hehfinalalttwoarabic": 0xFEEA,
+ "hehfinalarabic": 0xFEEA,
+ "hehinitialarabic": 0xFEEB,
+ "hehmedialarabic": 0xFEEC,
+ "wawfinalarabic": 0xFEEE,
+ "alefmaksurafinalarabic": 0xFEF0,
+ "yehfinalarabic": 0xFEF2,
+ "alefmaksurainitialarabic": 0xFEF3,
+ "yehinitialarabic": 0xFEF3,
+ "alefmaksuramedialarabic": 0xFEF4,
+ "yehmedialarabic": 0xFEF4,
+ "lamalefmaddaaboveisolatedarabic": 0xFEF5,
+ "lamalefmaddaabovefinalarabic": 0xFEF6,
+ "lamalefhamzaaboveisolatedarabic": 0xFEF7,
+ "lamalefhamzaabovefinalarabic": 0xFEF8,
+ "lamalefhamzabelowisolatedarabic": 0xFEF9,
+ "lamalefhamzabelowfinalarabic": 0xFEFA,
+ "lamalefisolatedarabic": 0xFEFB,
+ "lamaleffinalarabic": 0xFEFC,
+ "zerowidthjoiner": 0xFEFF,
+ "exclammonospace": 0xFF01,
+ "quotedblmonospace": 0xFF02,
+ "numbersignmonospace": 0xFF03,
+ "dollarmonospace": 0xFF04,
+ "percentmonospace": 0xFF05,
+ "ampersandmonospace": 0xFF06,
+ "quotesinglemonospace": 0xFF07,
+ "parenleftmonospace": 0xFF08,
+ "parenrightmonospace": 0xFF09,
+ "asteriskmonospace": 0xFF0A,
+ "plusmonospace": 0xFF0B,
+ "commamonospace": 0xFF0C,
+ "hyphenmonospace": 0xFF0D,
+ "periodmonospace": 0xFF0E,
+ "slashmonospace": 0xFF0F,
+ "zeromonospace": 0xFF10,
+ "onemonospace": 0xFF11,
+ "twomonospace": 0xFF12,
+ "threemonospace": 0xFF13,
+ "fourmonospace": 0xFF14,
+ "fivemonospace": 0xFF15,
+ "sixmonospace": 0xFF16,
+ "sevenmonospace": 0xFF17,
+ "eightmonospace": 0xFF18,
+ "ninemonospace": 0xFF19,
+ "colonmonospace": 0xFF1A,
+ "semicolonmonospace": 0xFF1B,
+ "lessmonospace": 0xFF1C,
+ "equalmonospace": 0xFF1D,
+ "greatermonospace": 0xFF1E,
+ "questionmonospace": 0xFF1F,
+ "atmonospace": 0xFF20,
+ "Amonospace": 0xFF21,
+ "Bmonospace": 0xFF22,
+ "Cmonospace": 0xFF23,
+ "Dmonospace": 0xFF24,
+ "Emonospace": 0xFF25,
+ "Fmonospace": 0xFF26,
+ "Gmonospace": 0xFF27,
+ "Hmonospace": 0xFF28,
+ "Imonospace": 0xFF29,
+ "Jmonospace": 0xFF2A,
+ "Kmonospace": 0xFF2B,
+ "Lmonospace": 0xFF2C,
+ "Mmonospace": 0xFF2D,
+ "Nmonospace": 0xFF2E,
+ "Omonospace": 0xFF2F,
+ "Pmonospace": 0xFF30,
+ "Qmonospace": 0xFF31,
+ "Rmonospace": 0xFF32,
+ "Smonospace": 0xFF33,
+ "Tmonospace": 0xFF34,
+ "Umonospace": 0xFF35,
+ "Vmonospace": 0xFF36,
+ "Wmonospace": 0xFF37,
+ "Xmonospace": 0xFF38,
+ "Ymonospace": 0xFF39,
+ "Zmonospace": 0xFF3A,
+ "bracketleftmonospace": 0xFF3B,
+ "backslashmonospace": 0xFF3C,
+ "bracketrightmonospace": 0xFF3D,
+ "asciicircummonospace": 0xFF3E,
+ "underscoremonospace": 0xFF3F,
+ "gravemonospace": 0xFF40,
+ "amonospace": 0xFF41,
+ "bmonospace": 0xFF42,
+ "cmonospace": 0xFF43,
+ "dmonospace": 0xFF44,
+ "emonospace": 0xFF45,
+ "fmonospace": 0xFF46,
+ "gmonospace": 0xFF47,
+ "hmonospace": 0xFF48,
+ "imonospace": 0xFF49,
+ "jmonospace": 0xFF4A,
+ "kmonospace": 0xFF4B,
+ "lmonospace": 0xFF4C,
+ "mmonospace": 0xFF4D,
+ "nmonospace": 0xFF4E,
+ "omonospace": 0xFF4F,
+ "pmonospace": 0xFF50,
+ "qmonospace": 0xFF51,
+ "rmonospace": 0xFF52,
+ "smonospace": 0xFF53,
+ "tmonospace": 0xFF54,
+ "umonospace": 0xFF55,
+ "vmonospace": 0xFF56,
+ "wmonospace": 0xFF57,
+ "xmonospace": 0xFF58,
+ "ymonospace": 0xFF59,
+ "zmonospace": 0xFF5A,
+ "braceleftmonospace": 0xFF5B,
+ "barmonospace": 0xFF5C,
+ "bracerightmonospace": 0xFF5D,
+ "asciitildemonospace": 0xFF5E,
+ "periodhalfwidth": 0xFF61,
+ "cornerbracketlefthalfwidth": 0xFF62,
+ "cornerbracketrighthalfwidth": 0xFF63,
+ "ideographiccommaleft": 0xFF64,
+ "middledotkatakanahalfwidth": 0xFF65,
+ "wokatakanahalfwidth": 0xFF66,
+ "asmallkatakanahalfwidth": 0xFF67,
+ "ismallkatakanahalfwidth": 0xFF68,
+ "usmallkatakanahalfwidth": 0xFF69,
+ "esmallkatakanahalfwidth": 0xFF6A,
+ "osmallkatakanahalfwidth": 0xFF6B,
+ "yasmallkatakanahalfwidth": 0xFF6C,
+ "yusmallkatakanahalfwidth": 0xFF6D,
+ "yosmallkatakanahalfwidth": 0xFF6E,
+ "tusmallkatakanahalfwidth": 0xFF6F,
+ "katahiraprolongmarkhalfwidth": 0xFF70,
+ "akatakanahalfwidth": 0xFF71,
+ "ikatakanahalfwidth": 0xFF72,
+ "ukatakanahalfwidth": 0xFF73,
+ "ekatakanahalfwidth": 0xFF74,
+ "okatakanahalfwidth": 0xFF75,
+ "kakatakanahalfwidth": 0xFF76,
+ "kikatakanahalfwidth": 0xFF77,
+ "kukatakanahalfwidth": 0xFF78,
+ "kekatakanahalfwidth": 0xFF79,
+ "kokatakanahalfwidth": 0xFF7A,
+ "sakatakanahalfwidth": 0xFF7B,
+ "sikatakanahalfwidth": 0xFF7C,
+ "sukatakanahalfwidth": 0xFF7D,
+ "sekatakanahalfwidth": 0xFF7E,
+ "sokatakanahalfwidth": 0xFF7F,
+ "takatakanahalfwidth": 0xFF80,
+ "tikatakanahalfwidth": 0xFF81,
+ "tukatakanahalfwidth": 0xFF82,
+ "tekatakanahalfwidth": 0xFF83,
+ "tokatakanahalfwidth": 0xFF84,
+ "nakatakanahalfwidth": 0xFF85,
+ "nikatakanahalfwidth": 0xFF86,
+ "nukatakanahalfwidth": 0xFF87,
+ "nekatakanahalfwidth": 0xFF88,
+ "nokatakanahalfwidth": 0xFF89,
+ "hakatakanahalfwidth": 0xFF8A,
+ "hikatakanahalfwidth": 0xFF8B,
+ "hukatakanahalfwidth": 0xFF8C,
+ "hekatakanahalfwidth": 0xFF8D,
+ "hokatakanahalfwidth": 0xFF8E,
+ "makatakanahalfwidth": 0xFF8F,
+ "mikatakanahalfwidth": 0xFF90,
+ "mukatakanahalfwidth": 0xFF91,
+ "mekatakanahalfwidth": 0xFF92,
+ "mokatakanahalfwidth": 0xFF93,
+ "yakatakanahalfwidth": 0xFF94,
+ "yukatakanahalfwidth": 0xFF95,
+ "yokatakanahalfwidth": 0xFF96,
+ "rakatakanahalfwidth": 0xFF97,
+ "rikatakanahalfwidth": 0xFF98,
+ "rukatakanahalfwidth": 0xFF99,
+ "rekatakanahalfwidth": 0xFF9A,
+ "rokatakanahalfwidth": 0xFF9B,
+ "wakatakanahalfwidth": 0xFF9C,
+ "nkatakanahalfwidth": 0xFF9D,
+ "voicedmarkkanahalfwidth": 0xFF9E,
+ "semivoicedmarkkanahalfwidth": 0xFF9F,
+ "centmonospace": 0xFFE0,
+ "sterlingmonospace": 0xFFE1,
+ "macronmonospace": 0xFFE3,
+ "yenmonospace": 0xFFE5,
+ "wonmonospace": 0xFFE6,
+}
diff --git a/godo/office/pdf/page.go b/godo/office/pdf/page.go
new file mode 100644
index 0000000..7389aba
--- /dev/null
+++ b/godo/office/pdf/page.go
@@ -0,0 +1,1047 @@
+package pdf
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "sort"
+ "strings"
+)
+
+// A Page represent a single page in a PDF file.
+// The methods interpret a Page dictionary stored in V.
+type Page struct {
+ V Value
+}
+
+// Page returns the page for the given page number.
+// Page numbers are indexed starting at 1, not 0.
+// If the page is not found, Page returns a Page with p.V.IsNull().
+func (r *Reader) Page(num int) Page {
+ num-- // now 0-indexed
+ page := r.Trailer().Key("Root").Key("Pages")
+Search:
+ for page.Key("Type").Name() == "Pages" {
+ count := int(page.Key("Count").Int64())
+ if count < num {
+ return Page{}
+ }
+ kids := page.Key("Kids")
+ for i := 0; i < kids.Len(); i++ {
+ kid := kids.Index(i)
+ if kid.Key("Type").Name() == "Pages" {
+ c := int(kid.Key("Count").Int64())
+ if num < c {
+ page = kid
+ continue Search
+ }
+ num -= c
+ continue
+ }
+ if kid.Key("Type").Name() == "Page" {
+ if num == 0 {
+ return Page{kid}
+ }
+ num--
+ }
+ }
+ break
+ }
+ return Page{}
+}
+
+// NumPage returns the number of pages in the PDF file.
+func (r *Reader) NumPage() int {
+ return int(r.Trailer().Key("Root").Key("Pages").Key("Count").Int64())
+}
+
+// GetPlainText returns all the text in the PDF file
+func (r *Reader) GetPlainText() (reader io.Reader, err error) {
+ pages := r.NumPage()
+ var buf bytes.Buffer
+ fonts := make(map[string]*Font)
+ for i := 1; i <= pages; i++ {
+ p := r.Page(i)
+ for _, name := range p.Fonts() { // cache fonts so we don't continually parse charmap
+ if _, ok := fonts[name]; !ok {
+ f := p.Font(name)
+ fonts[name] = &f
+ }
+ }
+ text, _ := p.GetPlainText(fonts)
+ // if err != nil {
+ // return &bytes.Buffer{}, err
+ // }
+ buf.WriteString(text)
+ }
+ return &buf, nil
+}
+
+func (p Page) findInherited(key string) Value {
+ for v := p.V; !v.IsNull(); v = v.Key("Parent") {
+ if r := v.Key(key); !r.IsNull() {
+ return r
+ }
+ }
+ return Value{}
+}
+
+/*
+func (p Page) MediaBox() Value {
+ return p.findInherited("MediaBox")
+}
+
+func (p Page) CropBox() Value {
+ return p.findInherited("CropBox")
+}
+*/
+
+// Resources returns the resources dictionary associated with the page.
+func (p Page) Resources() Value {
+ return p.findInherited("Resources")
+}
+
+// Fonts returns a list of the fonts associated with the page.
+func (p Page) Fonts() []string {
+ return p.Resources().Key("Font").Keys()
+}
+
+// Font returns the font with the given name associated with the page.
+func (p Page) Font(name string) Font {
+ return Font{p.Resources().Key("Font").Key(name), nil}
+}
+
+// A Font represent a font in a PDF file.
+// The methods interpret a Font dictionary stored in V.
+type Font struct {
+ V Value
+ enc TextEncoding
+}
+
+// BaseFont returns the font's name (BaseFont property).
+func (f Font) BaseFont() string {
+ return f.V.Key("BaseFont").Name()
+}
+
+// FirstChar returns the code point of the first character in the font.
+func (f Font) FirstChar() int {
+ return int(f.V.Key("FirstChar").Int64())
+}
+
+// LastChar returns the code point of the last character in the font.
+func (f Font) LastChar() int {
+ return int(f.V.Key("LastChar").Int64())
+}
+
+// Widths returns the widths of the glyphs in the font.
+// In a well-formed PDF, len(f.Widths()) == f.LastChar()+1 - f.FirstChar().
+func (f Font) Widths() []float64 {
+ x := f.V.Key("Widths")
+ var out []float64
+ for i := 0; i < x.Len(); i++ {
+ out = append(out, x.Index(i).Float64())
+ }
+ return out
+}
+
+// Width returns the width of the given code point.
+func (f Font) Width(code int) float64 {
+ first := f.FirstChar()
+ last := f.LastChar()
+ if code < first || last < code {
+ return 0
+ }
+ return f.V.Key("Widths").Index(code - first).Float64()
+}
+
+// Encoder returns the encoding between font code point sequences and UTF-8.
+func (f Font) Encoder() TextEncoding {
+ if f.enc == nil { // caching the Encoder so we don't have to continually parse charmap
+ f.enc = f.getEncoder()
+ }
+ return f.enc
+}
+
+func (f Font) getEncoder() TextEncoding {
+ enc := f.V.Key("Encoding")
+ switch enc.Kind() {
+ case Name:
+ switch enc.Name() {
+ case "WinAnsiEncoding":
+ return &byteEncoder{&winAnsiEncoding}
+ case "MacRomanEncoding":
+ return &byteEncoder{&macRomanEncoding}
+ case "Identity-H":
+ return f.charmapEncoding()
+ default:
+ if DebugOn {
+ println("unknown encoding", enc.Name())
+ }
+ return &nopEncoder{}
+ }
+ case Dict:
+ return &dictEncoder{enc.Key("Differences")}
+ case Null:
+ return f.charmapEncoding()
+ default:
+ if DebugOn {
+ println("unexpected encoding", enc.String())
+ }
+ return &nopEncoder{}
+ }
+}
+
+func (f *Font) charmapEncoding() TextEncoding {
+ toUnicode := f.V.Key("ToUnicode")
+ if toUnicode.Kind() == Stream {
+ m := readCmap(toUnicode)
+ if m == nil {
+ return &nopEncoder{}
+ }
+ return m
+ }
+
+ return &byteEncoder{&pdfDocEncoding}
+}
+
+type dictEncoder struct {
+ v Value
+}
+
+func (e *dictEncoder) Decode(raw string) (text string) {
+ r := make([]rune, 0, len(raw))
+ for i := 0; i < len(raw); i++ {
+ ch := rune(raw[i])
+ n := -1
+ for j := 0; j < e.v.Len(); j++ {
+ x := e.v.Index(j)
+ if x.Kind() == Integer {
+ n = int(x.Int64())
+ continue
+ }
+ if x.Kind() == Name {
+ if int(raw[i]) == n {
+ r := nameToRune[x.Name()]
+ if r != 0 {
+ ch = r
+ break
+ }
+ }
+ n++
+ }
+ }
+ r = append(r, ch)
+ }
+ return string(r)
+}
+
+// A TextEncoding represents a mapping between
+// font code points and UTF-8 text.
+type TextEncoding interface {
+ // Decode returns the UTF-8 text corresponding to
+ // the sequence of code points in raw.
+ Decode(raw string) (text string)
+}
+
+type nopEncoder struct {
+}
+
+func (e *nopEncoder) Decode(raw string) (text string) {
+ return raw
+}
+
+type byteEncoder struct {
+ table *[256]rune
+}
+
+func (e *byteEncoder) Decode(raw string) (text string) {
+ r := make([]rune, 0, len(raw))
+ for i := 0; i < len(raw); i++ {
+ r = append(r, e.table[raw[i]])
+ }
+ return string(r)
+}
+
+type byteRange struct {
+ low string
+ high string
+}
+
+type bfchar struct {
+ orig string
+ repl string
+}
+
+type bfrange struct {
+ lo string
+ hi string
+ dst Value
+}
+
+type cmap struct {
+ space [4][]byteRange // codespace range
+ bfrange []bfrange
+ bfchar []bfchar
+}
+
+func (m *cmap) Decode(raw string) (text string) {
+ var r []rune
+Parse:
+ for len(raw) > 0 {
+ for n := 1; n <= 4 && n <= len(raw); n++ { // number of digits in character replacement (1-4 possible)
+ for _, space := range m.space[n-1] { // find matching codespace Ranges for number of digits
+ if space.low <= raw[:n] && raw[:n] <= space.high { // see if value is in range
+ text := raw[:n]
+ raw = raw[n:]
+ for _, bfchar := range m.bfchar { // check for matching bfchar
+ if len(bfchar.orig) == n && bfchar.orig == text {
+ r = append(r, []rune(utf16Decode(bfchar.repl))...)
+ continue Parse
+ }
+ }
+ for _, bfrange := range m.bfrange { // check for matching bfrange
+ if len(bfrange.lo) == n && bfrange.lo <= text && text <= bfrange.hi {
+ if bfrange.dst.Kind() == String {
+ s := bfrange.dst.RawString()
+ if bfrange.lo != text { // value isn't at the beginning of the range so scale result
+ b := []byte(s)
+ b[len(b)-1] += text[len(text)-1] - bfrange.lo[len(bfrange.lo)-1] // increment last byte by difference
+ s = string(b)
+ }
+ r = append(r, []rune(utf16Decode(s))...)
+ continue Parse
+ }
+ if bfrange.dst.Kind() == Array {
+ n := text[len(text)-1] - bfrange.lo[len(bfrange.lo)-1]
+ v := bfrange.dst.Index(int(n))
+ if v.Kind() == String {
+ s := v.RawString()
+ r = append(r, []rune(utf16Decode(s))...)
+ continue Parse
+ }
+ if DebugOn {
+ fmt.Printf("array %v\n", bfrange.dst)
+ }
+ } else {
+ if DebugOn {
+ fmt.Printf("unknown dst %v\n", bfrange.dst)
+ }
+ }
+ r = append(r, noRune)
+ continue Parse
+ }
+ }
+ r = append(r, noRune)
+ continue Parse
+ }
+ }
+ }
+ if DebugOn {
+ println("no code space found")
+ }
+ r = append(r, noRune)
+ raw = raw[1:]
+ }
+ return string(r)
+}
+
+func readCmap(toUnicode Value) *cmap {
+ n := -1
+ var m cmap
+ ok := true
+ Interpret(toUnicode, func(stk *Stack, op string) {
+ if !ok {
+ return
+ }
+ switch op {
+ case "findresource":
+ stk.Pop() // category
+ stk.Pop() // key
+ stk.Push(newDict())
+ case "begincmap":
+ stk.Push(newDict())
+ case "endcmap":
+ stk.Pop()
+ case "begincodespacerange":
+ n = int(stk.Pop().Int64())
+ case "endcodespacerange":
+ if n < 0 {
+ if DebugOn {
+ println("missing begincodespacerange")
+ }
+ ok = false
+ return
+ }
+ for i := 0; i < n; i++ {
+ hi, lo := stk.Pop().RawString(), stk.Pop().RawString()
+ if len(lo) == 0 || len(lo) != len(hi) {
+ if DebugOn {
+ println("bad codespace range")
+ }
+ ok = false
+ return
+ }
+ m.space[len(lo)-1] = append(m.space[len(lo)-1], byteRange{lo, hi})
+ }
+ n = -1
+ case "beginbfchar":
+ n = int(stk.Pop().Int64())
+ case "endbfchar":
+ if n < 0 {
+ panic("missing beginbfchar")
+ }
+ for i := 0; i < n; i++ {
+ repl, orig := stk.Pop().RawString(), stk.Pop().RawString()
+ m.bfchar = append(m.bfchar, bfchar{orig, repl})
+ }
+ case "beginbfrange":
+ n = int(stk.Pop().Int64())
+ case "endbfrange":
+ if n < 0 {
+ panic("missing beginbfrange")
+ }
+ for i := 0; i < n; i++ {
+ dst, srcHi, srcLo := stk.Pop(), stk.Pop().RawString(), stk.Pop().RawString()
+ m.bfrange = append(m.bfrange, bfrange{srcLo, srcHi, dst})
+ }
+ case "defineresource":
+ stk.Pop().Name() // category
+ value := stk.Pop()
+ stk.Pop().Name() // key
+ stk.Push(value)
+ default:
+ if DebugOn {
+ println("interp\t", op)
+ }
+ }
+ })
+ if !ok {
+ return nil
+ }
+ return &m
+}
+
+type matrix [3][3]float64
+
+var ident = matrix{{1, 0, 0}, {0, 1, 0}, {0, 0, 1}}
+
+func (x matrix) mul(y matrix) matrix {
+ var z matrix
+ for i := 0; i < 3; i++ {
+ for j := 0; j < 3; j++ {
+ for k := 0; k < 3; k++ {
+ z[i][j] += x[i][k] * y[k][j]
+ }
+ }
+ }
+ return z
+}
+
+// A Text represents a single piece of text drawn on a page.
+type Text struct {
+ Font string // the font used
+ FontSize float64 // the font size, in points (1/72 of an inch)
+ X float64 // the X coordinate, in points, increasing left to right
+ Y float64 // the Y coordinate, in points, increasing bottom to top
+ W float64 // the width of the text, in points
+ S string // the actual UTF-8 text
+}
+
+// A Rect represents a rectangle.
+type Rect struct {
+ Min, Max Point
+}
+
+// A Point represents an X, Y pair.
+type Point struct {
+ X float64
+ Y float64
+}
+
+// Content describes the basic content on a page: the text and any drawn rectangles.
+type Content struct {
+ Text []Text
+ Rect []Rect
+}
+
+type gstate struct {
+ Tc float64
+ Tw float64
+ Th float64
+ Tl float64
+ Tf Font
+ Tfs float64
+ Tmode int
+ Trise float64
+ Tm matrix
+ Tlm matrix
+ Trm matrix
+ CTM matrix
+}
+
+// GetPlainText returns the page's all text without format.
+// fonts can be passed in (to improve parsing performance) or left nil
+func (p Page) GetPlainText(fonts map[string]*Font) (result string, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ result = ""
+ err = errors.New(fmt.Sprint(r))
+ }
+ }()
+
+ strm := p.V.Key("Contents")
+ var enc TextEncoding = &nopEncoder{}
+
+ if fonts == nil {
+ fonts = make(map[string]*Font)
+ for _, font := range p.Fonts() {
+ f := p.Font(font)
+ fonts[font] = &f
+ }
+ }
+
+ var textBuilder bytes.Buffer
+ showText := func(s string) {
+ for _, ch := range enc.Decode(s) {
+ _, err := textBuilder.WriteRune(ch)
+ if err != nil {
+ panic(err)
+ }
+ }
+ }
+
+ Interpret(strm, func(stk *Stack, op string) {
+ n := stk.Len()
+ args := make([]Value, n)
+ for i := n - 1; i >= 0; i-- {
+ args[i] = stk.Pop()
+ }
+
+ switch op {
+ default:
+ return
+ case "T*": // move to start of next line
+ showText("\n")
+ case "Tf": // set text font and size
+ if len(args) != 2 {
+ panic("bad TL")
+ }
+ if font, ok := fonts[args[0].Name()]; ok {
+ enc = font.Encoder()
+ } else {
+ enc = &nopEncoder{}
+ }
+ case "\"": // set spacing, move to next line, and show text
+ if len(args) != 3 {
+ panic("bad \" operator")
+ }
+ fallthrough
+ case "'": // move to next line and show text
+ if len(args) != 1 {
+ panic("bad ' operator")
+ }
+ fallthrough
+ case "Tj": // show text
+ if len(args) != 1 {
+ panic("bad Tj operator")
+ }
+ showText(args[0].RawString())
+ case "TJ": // show text, allowing individual glyph positioning
+ v := args[0]
+ for i := 0; i < v.Len(); i++ {
+ x := v.Index(i)
+ if x.Kind() == String {
+ showText(x.RawString())
+ }
+ }
+ }
+ })
+ return textBuilder.String(), nil
+}
+
+// Column represents the contents of a column
+type Column struct {
+ Position int64
+ Content TextVertical
+}
+
+// Columns is a list of column
+type Columns []*Column
+
+// GetTextByColumn returns the page's all text grouped by column
+func (p Page) GetTextByColumn() (Columns, error) {
+ result := Columns{}
+ var err error
+
+ defer func() {
+ if r := recover(); r != nil {
+ result = Columns{}
+ err = errors.New(fmt.Sprint(r))
+ }
+ }()
+
+ showText := func(enc TextEncoding, currentX, currentY float64, s string) {
+ var textBuilder bytes.Buffer
+
+ for _, ch := range enc.Decode(s) {
+ _, err := textBuilder.WriteRune(ch)
+ if err != nil {
+ panic(err)
+ }
+ }
+ text := Text{
+ S: textBuilder.String(),
+ X: currentX,
+ Y: currentY,
+ }
+
+ var currentColumn *Column
+ columnFound := false
+ for _, column := range result {
+ if int64(currentX) == column.Position {
+ currentColumn = column
+ columnFound = true
+ break
+ }
+ }
+
+ if !columnFound {
+ currentColumn = &Column{
+ Position: int64(currentX),
+ Content: TextVertical{},
+ }
+ result = append(result, currentColumn)
+ }
+
+ currentColumn.Content = append(currentColumn.Content, text)
+ }
+
+ p.walkTextBlocks(showText)
+
+ for _, column := range result {
+ sort.Sort(column.Content)
+ }
+
+ sort.Slice(result, func(i, j int) bool {
+ return result[i].Position < result[j].Position
+ })
+
+ return result, err
+}
+
+// Row represents the contents of a row
+type Row struct {
+ Position int64
+ Content TextHorizontal
+}
+
+// Rows is a list of rows
+type Rows []*Row
+
+// GetTextByRow returns the page's all text grouped by rows
+func (p Page) GetTextByRow() (Rows, error) {
+ result := Rows{}
+ var err error
+
+ defer func() {
+ if r := recover(); r != nil {
+ result = Rows{}
+ err = errors.New(fmt.Sprint(r))
+ }
+ }()
+
+ showText := func(enc TextEncoding, currentX, currentY float64, s string) {
+ var textBuilder bytes.Buffer
+ for _, ch := range enc.Decode(s) {
+ _, err := textBuilder.WriteRune(ch)
+ if err != nil {
+ panic(err)
+ }
+ }
+
+ // if DebugOn {
+ // fmt.Println(textBuilder.String())
+ // }
+
+ text := Text{
+ S: textBuilder.String(),
+ X: currentX,
+ Y: currentY,
+ }
+
+ var currentRow *Row
+ rowFound := false
+ for _, row := range result {
+ if int64(currentY) == row.Position {
+ currentRow = row
+ rowFound = true
+ break
+ }
+ }
+
+ if !rowFound {
+ currentRow = &Row{
+ Position: int64(currentY),
+ Content: TextHorizontal{},
+ }
+ result = append(result, currentRow)
+ }
+
+ currentRow.Content = append(currentRow.Content, text)
+ }
+
+ p.walkTextBlocks(showText)
+
+ for _, row := range result {
+ sort.Sort(row.Content)
+ }
+
+ sort.Slice(result, func(i, j int) bool {
+ return result[i].Position > result[j].Position
+ })
+
+ return result, err
+}
+
+func (p Page) walkTextBlocks(walker func(enc TextEncoding, x, y float64, s string)) {
+ strm := p.V.Key("Contents")
+
+ fonts := make(map[string]*Font)
+ for _, font := range p.Fonts() {
+ f := p.Font(font)
+ fonts[font] = &f
+ }
+
+ var enc TextEncoding = &nopEncoder{}
+ var currentX, currentY float64
+ Interpret(strm, func(stk *Stack, op string) {
+ n := stk.Len()
+ args := make([]Value, n)
+ for i := n - 1; i >= 0; i-- {
+ args[i] = stk.Pop()
+ }
+
+ // if DebugOn {
+ // fmt.Println(op, "->", args)
+ // }
+
+ switch op {
+ default:
+ return
+ case "T*": // move to start of next line
+ case "Tf": // set text font and size
+ if len(args) != 2 {
+ panic("bad TL")
+ }
+
+ if font, ok := fonts[args[0].Name()]; ok {
+ enc = font.Encoder()
+ } else {
+ enc = &nopEncoder{}
+ }
+ case "\"": // set spacing, move to next line, and show text
+ if len(args) != 3 {
+ panic("bad \" operator")
+ }
+ fallthrough
+ case "'": // move to next line and show text
+ if len(args) != 1 {
+ panic("bad ' operator")
+ }
+ fallthrough
+ case "Tj": // show text
+ if len(args) != 1 {
+ panic("bad Tj operator")
+ }
+
+ walker(enc, currentX, currentY, args[0].RawString())
+ case "TJ": // show text, allowing individual glyph positioning
+ v := args[0]
+ for i := 0; i < v.Len(); i++ {
+ x := v.Index(i)
+ if x.Kind() == String {
+ walker(enc, currentX, currentY, x.RawString())
+ }
+ }
+ case "Td":
+ walker(enc, currentX, currentY, "")
+ case "Tm":
+ currentX = args[4].Float64()
+ currentY = args[5].Float64()
+ }
+ })
+}
+
+// Content returns the page's content.
+func (p Page) Content() Content {
+ strm := p.V.Key("Contents")
+ var enc TextEncoding = &nopEncoder{}
+
+ var g = gstate{
+ Th: 1,
+ CTM: ident,
+ }
+
+ var text []Text
+ showText := func(s string) {
+ n := 0
+ decoded := enc.Decode(s)
+ for _, ch := range decoded {
+ var w0 float64
+ if n < len(s) {
+ w0 = g.Tf.Width(int(s[n]))
+ }
+ n++
+
+ f := g.Tf.BaseFont()
+ if i := strings.Index(f, "+"); i >= 0 {
+ f = f[i+1:]
+ }
+
+ Trm := matrix{{g.Tfs * g.Th, 0, 0}, {0, g.Tfs, 0}, {0, g.Trise, 1}}.mul(g.Tm).mul(g.CTM)
+ text = append(text, Text{f, Trm[0][0], Trm[2][0], Trm[2][1], w0 / 1000 * Trm[0][0], string(ch)})
+
+ tx := w0/1000*g.Tfs + g.Tc
+ tx *= g.Th
+ g.Tm = matrix{{1, 0, 0}, {0, 1, 0}, {tx, 0, 1}}.mul(g.Tm)
+ }
+ }
+
+ var rect []Rect
+ var gstack []gstate
+ Interpret(strm, func(stk *Stack, op string) {
+ n := stk.Len()
+ args := make([]Value, n)
+ for i := n - 1; i >= 0; i-- {
+ args[i] = stk.Pop()
+ }
+ switch op {
+ default:
+ // if DebugOn {
+ // fmt.Println(op, args)
+ // }
+ return
+
+ case "cm": // update g.CTM
+ if len(args) != 6 {
+ panic("bad g.Tm")
+ }
+ var m matrix
+ for i := 0; i < 6; i++ {
+ m[i/2][i%2] = args[i].Float64()
+ }
+ m[2][2] = 1
+ g.CTM = m.mul(g.CTM)
+
+ case "gs": // set parameters from graphics state resource
+ //gs := p.Resources().Key("ExtGState").Key(args[0].Name())
+ //font := gs.Key("Font")
+ //if font.Kind() == Array && font.Len() == 2 {
+ // if DebugOn {
+ // fmt.Println("FONT", font)
+ // }
+ //}
+
+ case "f": // fill
+ case "g": // setgray
+ case "l": // lineto
+ case "m": // moveto
+
+ case "cs": // set colorspace non-stroking
+ case "scn": // set color non-stroking
+
+ case "re": // append rectangle to path
+ if len(args) != 4 {
+ panic("bad re")
+ }
+ x, y, w, h := args[0].Float64(), args[1].Float64(), args[2].Float64(), args[3].Float64()
+ rect = append(rect, Rect{Point{x, y}, Point{x + w, y + h}})
+
+ case "q": // save graphics state
+ gstack = append(gstack, g)
+
+ case "Q": // restore graphics state
+ n := len(gstack) - 1
+ g = gstack[n]
+ gstack = gstack[:n]
+
+ case "BT": // begin text (reset text matrix and line matrix)
+ g.Tm = ident
+ g.Tlm = g.Tm
+
+ case "ET": // end text
+
+ case "T*": // move to start of next line
+ x := matrix{{1, 0, 0}, {0, 1, 0}, {0, -g.Tl, 1}}
+ g.Tlm = x.mul(g.Tlm)
+ g.Tm = g.Tlm
+
+ case "Tc": // set character spacing
+ if len(args) != 1 {
+ panic("bad g.Tc")
+ }
+ g.Tc = args[0].Float64()
+
+ case "TD": // move text position and set leading
+ if len(args) != 2 {
+ panic("bad Td")
+ }
+ g.Tl = -args[1].Float64()
+ fallthrough
+ case "Td": // move text position
+ if len(args) != 2 {
+ panic("bad Td")
+ }
+ tx := args[0].Float64()
+ ty := args[1].Float64()
+ x := matrix{{1, 0, 0}, {0, 1, 0}, {tx, ty, 1}}
+ g.Tlm = x.mul(g.Tlm)
+ g.Tm = g.Tlm
+
+ case "Tf": // set text font and size
+ if len(args) != 2 {
+ panic("bad TL")
+ }
+ f := args[0].Name()
+ g.Tf = p.Font(f)
+ enc = g.Tf.Encoder()
+ if enc == nil {
+ if DebugOn {
+ println("no cmap for", f)
+ }
+ enc = &nopEncoder{}
+ }
+ g.Tfs = args[1].Float64()
+
+ case "\"": // set spacing, move to next line, and show text
+ if len(args) != 3 {
+ panic("bad \" operator")
+ }
+ g.Tw = args[0].Float64()
+ g.Tc = args[1].Float64()
+ args = args[2:]
+ fallthrough
+ case "'": // move to next line and show text
+ if len(args) != 1 {
+ panic("bad ' operator")
+ }
+ x := matrix{{1, 0, 0}, {0, 1, 0}, {0, -g.Tl, 1}}
+ g.Tlm = x.mul(g.Tlm)
+ g.Tm = g.Tlm
+ fallthrough
+ case "Tj": // show text
+ if len(args) != 1 {
+ panic("bad Tj operator")
+ }
+ showText(args[0].RawString())
+
+ case "TJ": // show text, allowing individual glyph positioning
+ v := args[0]
+ for i := 0; i < v.Len(); i++ {
+ x := v.Index(i)
+ if x.Kind() == String {
+ showText(x.RawString())
+ } else {
+ tx := -x.Float64() / 1000 * g.Tfs * g.Th
+ g.Tm = matrix{{1, 0, 0}, {0, 1, 0}, {tx, 0, 1}}.mul(g.Tm)
+ }
+ }
+ showText("\n")
+
+ case "TL": // set text leading
+ if len(args) != 1 {
+ panic("bad TL")
+ }
+ g.Tl = args[0].Float64()
+
+ case "Tm": // set text matrix and line matrix
+ if len(args) != 6 {
+ panic("bad g.Tm")
+ }
+ var m matrix
+ for i := 0; i < 6; i++ {
+ m[i/2][i%2] = args[i].Float64()
+ }
+ m[2][2] = 1
+ g.Tm = m
+ g.Tlm = m
+
+ case "Tr": // set text rendering mode
+ if len(args) != 1 {
+ panic("bad Tr")
+ }
+ g.Tmode = int(args[0].Int64())
+
+ case "Ts": // set text rise
+ if len(args) != 1 {
+ panic("bad Ts")
+ }
+ g.Trise = args[0].Float64()
+
+ case "Tw": // set word spacing
+ if len(args) != 1 {
+ panic("bad g.Tw")
+ }
+ g.Tw = args[0].Float64()
+
+ case "Tz": // set horizontal text scaling
+ if len(args) != 1 {
+ panic("bad Tz")
+ }
+ g.Th = args[0].Float64() / 100
+ }
+ })
+ return Content{text, rect}
+}
+
+// TextVertical implements sort.Interface for sorting
+// a slice of Text values in vertical order, top to bottom,
+// and then left to right within a line.
+type TextVertical []Text
+
+func (x TextVertical) Len() int { return len(x) }
+func (x TextVertical) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x TextVertical) Less(i, j int) bool {
+ if x[i].Y != x[j].Y {
+ return x[i].Y > x[j].Y
+ }
+ return x[i].X < x[j].X
+}
+
+// TextHorizontal implements sort.Interface for sorting
+// a slice of Text values in horizontal order, left to right,
+// and then top to bottom within a column.
+type TextHorizontal []Text
+
+func (x TextHorizontal) Len() int { return len(x) }
+func (x TextHorizontal) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x TextHorizontal) Less(i, j int) bool {
+ if x[i].X != x[j].X {
+ return x[i].X < x[j].X
+ }
+ return x[i].Y > x[j].Y
+}
+
+// An Outline is a tree describing the outline (also known as the table of contents)
+// of a document.
+type Outline struct {
+ Title string // title for this element
+ Child []Outline // child elements
+}
+
+// Outline returns the document outline.
+// The Outline returned is the root of the outline tree and typically has no Title itself.
+// That is, the children of the returned root are the top-level entries in the outline.
+func (r *Reader) Outline() Outline {
+ return buildOutline(r.Trailer().Key("Root").Key("Outlines"))
+}
+
+func buildOutline(entry Value) Outline {
+ var x Outline
+ x.Title = entry.Key("Title").Text()
+ for child := entry.Key("First"); child.Kind() == Dict; child = child.Key("Next") {
+ x.Child = append(x.Child, buildOutline(child))
+ }
+ return x
+}
diff --git a/godo/office/pdf/ps.go b/godo/office/pdf/ps.go
new file mode 100644
index 0000000..6a0279f
--- /dev/null
+++ b/godo/office/pdf/ps.go
@@ -0,0 +1,134 @@
+package pdf
+
+import (
+ "fmt"
+ "io"
+)
+
+// A Stack represents a stack of values.
+type Stack struct {
+ stack []Value
+}
+
+func (stk *Stack) Len() int {
+ return len(stk.stack)
+}
+
+func (stk *Stack) Push(v Value) {
+ stk.stack = append(stk.stack, v)
+}
+
+func (stk *Stack) Pop() Value {
+ n := len(stk.stack)
+ if n == 0 {
+ return Value{}
+ }
+ v := stk.stack[n-1]
+ stk.stack[n-1] = Value{}
+ stk.stack = stk.stack[:n-1]
+ return v
+}
+
+func newDict() Value {
+ return Value{nil, objptr{}, make(dict)}
+}
+
+// Interpret interprets the content in a stream as a basic PostScript program,
+// pushing values onto a stack and then calling the do function to execute
+// operators. The do function may push or pop values from the stack as needed
+// to implement op.
+//
+// Interpret handles the operators "dict", "currentdict", "begin", "end", "def", and "pop" itself.
+//
+// Interpret is not a full-blown PostScript interpreter. Its job is to handle the
+// very limited PostScript found in certain supporting file formats embedded
+// in PDF files, such as cmap files that describe the mapping from font code
+// points to Unicode code points.
+//
+// There is no support for executable blocks, among other limitations.
+func Interpret(strm Value, do func(stk *Stack, op string)) {
+
+ rd := strm.Reader()
+ b := newBuffer(rd, 0)
+ b.allowEOF = true
+ b.allowObjptr = false
+ b.allowStream = false
+ var stk Stack
+ var dicts []dict
+Reading:
+ for {
+ tok := b.readToken()
+ if tok == io.EOF {
+ break
+ }
+ if kw, ok := tok.(keyword); ok {
+ switch kw {
+ case "null", "[", "]", "<<", ">>":
+ break
+ default:
+ for i := len(dicts) - 1; i >= 0; i-- {
+ if v, ok := dicts[i][name(kw)]; ok {
+ stk.Push(Value{nil, objptr{}, v})
+ continue Reading
+ }
+ }
+ do(&stk, string(kw))
+ continue
+ case "dict":
+ stk.Pop()
+ stk.Push(Value{nil, objptr{}, make(dict)})
+ continue
+ case "currentdict":
+ if len(dicts) == 0 {
+ panic("no current dictionary")
+ }
+ stk.Push(Value{nil, objptr{}, dicts[len(dicts)-1]})
+ continue
+ case "begin":
+ d := stk.Pop()
+ if d.Kind() != Dict {
+ panic("cannot begin non-dict")
+ }
+ dicts = append(dicts, d.data.(dict))
+ continue
+ case "end":
+ if len(dicts) <= 0 {
+ panic("mismatched begin/end")
+ }
+ dicts = dicts[:len(dicts)-1]
+ continue
+ case "def":
+ if len(dicts) <= 0 {
+ panic("def without open dict")
+ }
+ val := stk.Pop()
+ key, ok := stk.Pop().data.(name)
+ if !ok {
+ panic("def of non-name")
+ }
+ dicts[len(dicts)-1][key] = val.data
+ continue
+ case "pop":
+ stk.Pop()
+ continue
+ }
+ }
+ b.unreadToken(tok)
+ obj := b.readObject()
+ stk.Push(Value{nil, objptr{}, obj})
+ }
+}
+
+type seqReader struct {
+ rd io.Reader
+ offset int64
+}
+
+func (r *seqReader) ReadAt(buf []byte, offset int64) (int, error) {
+ if offset != r.offset {
+ return 0, fmt.Errorf("non-sequential read of stream")
+ }
+ n, err := io.ReadFull(r.rd, buf)
+ r.offset += int64(n)
+ return n, err
+}
diff --git a/godo/office/pdf/read.go b/godo/office/pdf/read.go
new file mode 100644
index 0000000..1a35257
--- /dev/null
+++ b/godo/office/pdf/read.go
@@ -0,0 +1,1111 @@
+// Package pdf implements reading of PDF files.
+//
+// # Overview
+//
+// PDF is Adobe's Portable Document Format, ubiquitous on the internet.
+// A PDF document is a complex data format built on a fairly simple structure.
+// This package exposes the simple structure along with some wrappers to
+// extract basic information. If more complex information is needed, it is
+// possible to extract that information by interpreting the structure exposed
+// by this package.
+//
+// Specifically, a PDF is a data structure built from Values, each of which has
+// one of the following Kinds:
+//
+// Null, for the null object.
+// Integer, for an integer.
+// Real, for a floating-point number.
+// Bool, for a boolean value.
+// Name, for a name constant (as in /Helvetica).
+// String, for a string constant.
+// Dict, for a dictionary of name-value pairs.
+// Array, for an array of values.
+// Stream, for an opaque data stream and associated header dictionary.
+//
+// The accessors on Value—Int64, Float64, Bool, Name, and so on—return
+// a view of the data as the given type. When there is no appropriate view,
+// the accessor returns a zero result. For example, the Name accessor returns
+// the empty string if called on a Value v for which v.Kind() != Name.
+// Returning zero values this way, especially from the Dict and Array accessors,
+// which themselves return Values, makes it possible to traverse a PDF quickly
+// without writing any error checking. On the other hand, it means that mistakes
+// can go unreported.
+//
+// The basic structure of the PDF file is exposed as the graph of Values.
+//
+// Most richer data structures in a PDF file are dictionaries with specific interpretations
+// of the name-value pairs. The Font and Page wrappers make the interpretation
+// of a specific Value as the corresponding type easier. They are only helpers, though:
+// they are implemented only in terms of the Value API and could be moved outside
+// the package. Equally important, traversal of other PDF data structures can be implemented
+// in other packages as needed.
+package pdf
+
+// BUG(rsc): The package is incomplete, although it has been used successfully on some
+// large real-world PDF files.
+
+// BUG(rsc): There is no support for closing open PDF files. If you drop all references to a Reader,
+// the underlying reader will eventually be garbage collected.
+
+// BUG(rsc): The library makes no attempt at efficiency. A value cache maintained in the Reader
+// would probably help significantly.
+
+// BUG(rsc): The support for reading encrypted files is weak.
+
+// BUG(rsc): The Value API does not support error reporting. The intent is to allow users to
+// set an error reporting callback in Reader, but that code has not been implemented.
+
+import (
+ "bytes"
+ "compress/zlib"
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/md5"
+ "crypto/rc4"
+ "encoding/ascii85"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "sort"
+ "strconv"
+)
+
+// DebugOn is responsible for logging messages into stdout. If problems arise during reading, set it true.
+var DebugOn = false
+
+// A Reader is a single PDF file open for reading.
+type Reader struct {
+ f io.ReaderAt
+ end int64
+ xref []xref
+ trailer dict
+ trailerptr objptr
+ key []byte
+ useAES bool
+}
+
+type xref struct {
+ ptr objptr
+ inStream bool
+ stream objptr
+ offset int64
+}
+
+func (r *Reader) errorf(format string, args ...interface{}) {
+ panic(fmt.Errorf(format, args...))
+}
+
+// Open opens a file for reading.
+func Open(file string) (*os.File, *Reader, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ f.Close()
+ return nil, nil, err
+ }
+ fi, err := f.Stat()
+ if err != nil {
+ f.Close()
+ return nil, nil, err
+ }
+ reader, err := NewReader(f, fi.Size())
+ if err != nil {
+ f.Close()
+ return nil, nil, err
+ }
+ return f, reader, err
+}
+
+// NewReader opens a file for reading, using the data in f with the given total size.
+func NewReader(f io.ReaderAt, size int64) (*Reader, error) {
+ return NewReaderEncrypted(f, size, nil)
+}
+
+// NewReaderEncrypted opens a file for reading, using the data in f with the given total size.
+// If the PDF is encrypted, NewReaderEncrypted calls pw repeatedly to obtain passwords
+// to try. If pw returns the empty string, NewReaderEncrypted stops trying to decrypt
+// the file and returns an error.
+func NewReaderEncrypted(f io.ReaderAt, size int64, pw func() string) (*Reader, error) {
+ buf := make([]byte, 10)
+ f.ReadAt(buf, 0)
+ if !bytes.HasPrefix(buf, []byte("%PDF-1.")) || buf[7] < '0' || buf[7] > '7' || buf[8] != '\r' && buf[8] != '\n' {
+ return nil, fmt.Errorf("not a PDF file: invalid header")
+ }
+ end := size
+ const endChunk = 100
+ buf = make([]byte, endChunk)
+ f.ReadAt(buf, end-endChunk)
+ for len(buf) > 0 && buf[len(buf)-1] == '\n' || buf[len(buf)-1] == '\r' {
+ buf = buf[:len(buf)-1]
+ }
+ buf = bytes.TrimRight(buf, "\r\n\t ")
+ if !bytes.HasSuffix(buf, []byte("%%EOF")) {
+ return nil, fmt.Errorf("not a PDF file: missing %%%%EOF")
+ }
+ i := findLastLine(buf, "startxref")
+ if i < 0 {
+ return nil, fmt.Errorf("malformed PDF file: missing final startxref")
+ }
+
+ r := &Reader{
+ f: f,
+ end: end,
+ }
+ pos := end - endChunk + int64(i)
+ b := newBuffer(io.NewSectionReader(f, pos, end-pos), pos)
+ if b.readToken() != keyword("startxref") {
+ return nil, fmt.Errorf("malformed PDF file: missing startxref")
+ }
+ startxref, ok := b.readToken().(int64)
+ if !ok {
+ return nil, fmt.Errorf("malformed PDF file: startxref not followed by integer")
+ }
+ b = newBuffer(io.NewSectionReader(r.f, startxref, r.end-startxref), startxref)
+ xref, trailerptr, trailer, err := readXref(r, b)
+ if err != nil {
+ return nil, err
+ }
+ r.xref = xref
+ r.trailer = trailer
+ r.trailerptr = trailerptr
+ if trailer["Encrypt"] == nil {
+ return r, nil
+ }
+ err = r.initEncrypt("")
+ if err == nil {
+ return r, nil
+ }
+ if pw == nil || err != ErrInvalidPassword {
+ return nil, err
+ }
+ for {
+ next := pw()
+ if next == "" {
+ break
+ }
+ if r.initEncrypt(next) == nil {
+ return r, nil
+ }
+ }
+ return nil, err
+}
+
+// Trailer returns the file's Trailer value.
+func (r *Reader) Trailer() Value {
+ return Value{r, r.trailerptr, r.trailer}
+}
+
+func readXref(r *Reader, b *buffer) ([]xref, objptr, dict, error) {
+ tok := b.readToken()
+ if tok == keyword("xref") {
+ return readXrefTable(r, b)
+ }
+ if _, ok := tok.(int64); ok {
+ b.unreadToken(tok)
+ return readXrefStream(r, b)
+ }
+ return nil, objptr{}, nil, fmt.Errorf("malformed PDF: cross-reference table not found: %v", tok)
+}
+
+func readXrefStream(r *Reader, b *buffer) ([]xref, objptr, dict, error) {
+ obj1 := b.readObject()
+ obj, ok := obj1.(objdef)
+ if !ok {
+ return nil, objptr{}, nil, fmt.Errorf("malformed PDF: cross-reference table not found: %v", objfmt(obj1))
+ }
+ strmptr := obj.ptr
+ strm, ok := obj.obj.(stream)
+ if !ok {
+ return nil, objptr{}, nil, fmt.Errorf("malformed PDF: cross-reference table not found: %v", objfmt(obj))
+ }
+ if strm.hdr["Type"] != name("XRef") {
+ return nil, objptr{}, nil, fmt.Errorf("malformed PDF: xref stream does not have type XRef")
+ }
+ size, ok := strm.hdr["Size"].(int64)
+ if !ok {
+ return nil, objptr{}, nil, fmt.Errorf("malformed PDF: xref stream missing Size")
+ }
+ table := make([]xref, size)
+
+ table, err := readXrefStreamData(r, strm, table, size)
+ if err != nil {
+ return nil, objptr{}, nil, fmt.Errorf("malformed PDF: %v", err)
+ }
+
+ for prevoff := strm.hdr["Prev"]; prevoff != nil; {
+ off, ok := prevoff.(int64)
+ if !ok {
+ return nil, objptr{}, nil, fmt.Errorf("malformed PDF: xref Prev is not integer: %v", prevoff)
+ }
+ b := newBuffer(io.NewSectionReader(r.f, off, r.end-off), off)
+ obj1 := b.readObject()
+ obj, ok := obj1.(objdef)
+ if !ok {
+ return nil, objptr{}, nil, fmt.Errorf("malformed PDF: xref prev stream not found: %v", objfmt(obj1))
+ }
+ prevstrm, ok := obj.obj.(stream)
+ if !ok {
+ return nil, objptr{}, nil, fmt.Errorf("malformed PDF: xref prev stream not found: %v", objfmt(obj))
+ }
+ prevoff = prevstrm.hdr["Prev"]
+ prev := Value{r, objptr{}, prevstrm}
+ if prev.Kind() != Stream {
+ return nil, objptr{}, nil, fmt.Errorf("malformed PDF: xref prev stream is not stream: %v", prev)
+ }
+ if prev.Key("Type").Name() != "XRef" {
+ return nil, objptr{}, nil, fmt.Errorf("malformed PDF: xref prev stream does not have type XRef")
+ }
+ psize := prev.Key("Size").Int64()
+ if psize > size {
+ return nil, objptr{}, nil, fmt.Errorf("malformed PDF: xref prev stream larger than last stream")
+ }
+ if table, err = readXrefStreamData(r, prev.data.(stream), table, psize); err != nil {
+ return nil, objptr{}, nil, fmt.Errorf("malformed PDF: reading xref prev stream: %v", err)
+ }
+ }
+
+ return table, strmptr, strm.hdr, nil
+}
+
+func readXrefStreamData(r *Reader, strm stream, table []xref, size int64) ([]xref, error) {
+ index, _ := strm.hdr["Index"].(array)
+ if index == nil {
+ index = array{int64(0), size}
+ }
+ if len(index)%2 != 0 {
+ return nil, fmt.Errorf("invalid Index array %v", objfmt(index))
+ }
+ ww, ok := strm.hdr["W"].(array)
+ if !ok {
+ return nil, fmt.Errorf("xref stream missing W array")
+ }
+
+ var w []int
+ for _, x := range ww {
+ i, ok := x.(int64)
+ if !ok || int64(int(i)) != i {
+ return nil, fmt.Errorf("invalid W array %v", objfmt(ww))
+ }
+ w = append(w, int(i))
+ }
+ if len(w) < 3 {
+ return nil, fmt.Errorf("invalid W array %v", objfmt(ww))
+ }
+
+ v := Value{r, objptr{}, strm}
+ wtotal := 0
+ for _, wid := range w {
+ wtotal += wid
+ }
+ buf := make([]byte, wtotal)
+ data := v.Reader()
+ for len(index) > 0 {
+ start, ok1 := index[0].(int64)
+ n, ok2 := index[1].(int64)
+ if !ok1 || !ok2 {
+ return nil, fmt.Errorf("malformed Index pair %v %v %T %T", objfmt(index[0]), objfmt(index[1]), index[0], index[1])
+ }
+ index = index[2:]
+ for i := 0; i < int(n); i++ {
+ _, err := io.ReadFull(data, buf)
+ if err != nil {
+ return nil, fmt.Errorf("error reading xref stream: %v", err)
+ }
+ v1 := decodeInt(buf[0:w[0]])
+ if w[0] == 0 {
+ v1 = 1
+ }
+ v2 := decodeInt(buf[w[0] : w[0]+w[1]])
+ v3 := decodeInt(buf[w[0]+w[1] : w[0]+w[1]+w[2]])
+ x := int(start) + i
+ for cap(table) <= x {
+ table = append(table[:cap(table)], xref{})
+ }
+ if table[x].ptr != (objptr{}) {
+ continue
+ }
+ switch v1 {
+ case 0:
+ table[x] = xref{ptr: objptr{0, 65535}}
+ case 1:
+ table[x] = xref{ptr: objptr{uint32(x), uint16(v3)}, offset: int64(v2)}
+ case 2:
+ table[x] = xref{ptr: objptr{uint32(x), 0}, inStream: true, stream: objptr{uint32(v2), 0}, offset: int64(v3)}
+ default:
+ if DebugOn {
+ fmt.Printf("invalid xref stream type %d: %x\n", v1, buf)
+ }
+ }
+ }
+ }
+ return table, nil
+}
+
+func decodeInt(b []byte) int {
+ x := 0
+ for _, c := range b {
+ x = x<<8 | int(c)
+ }
+ return x
+}
+
+func readXrefTable(r *Reader, b *buffer) ([]xref, objptr, dict, error) {
+ var table []xref
+
+ table, err := readXrefTableData(b, table)
+ if err != nil {
+ return nil, objptr{}, nil, fmt.Errorf("malformed PDF: %v", err)
+ }
+
+ trailer, ok := b.readObject().(dict)
+ if !ok {
+ return nil, objptr{}, nil, fmt.Errorf("malformed PDF: xref table not followed by trailer dictionary")
+ }
+
+ for prevoff := trailer["Prev"]; prevoff != nil; {
+ off, ok := prevoff.(int64)
+ if !ok {
+ return nil, objptr{}, nil, fmt.Errorf("malformed PDF: xref Prev is not integer: %v", prevoff)
+ }
+ b := newBuffer(io.NewSectionReader(r.f, off, r.end-off), off)
+ tok := b.readToken()
+ if tok != keyword("xref") {
+ return nil, objptr{}, nil, fmt.Errorf("malformed PDF: xref Prev does not point to xref")
+ }
+ table, err = readXrefTableData(b, table)
+ if err != nil {
+ return nil, objptr{}, nil, fmt.Errorf("malformed PDF: %v", err)
+ }
+
+ trailer, ok := b.readObject().(dict)
+ if !ok {
+ return nil, objptr{}, nil, fmt.Errorf("malformed PDF: xref Prev table not followed by trailer dictionary")
+ }
+ prevoff = trailer["Prev"]
+ }
+
+ size, ok := trailer[name("Size")].(int64)
+ if !ok {
+ return nil, objptr{}, nil, fmt.Errorf("malformed PDF: trailer missing /Size entry")
+ }
+
+ if size < int64(len(table)) {
+ table = table[:size]
+ }
+
+ return table, objptr{}, trailer, nil
+}
+
+func readXrefTableData(b *buffer, table []xref) ([]xref, error) {
+ for {
+ tok := b.readToken()
+ if tok == keyword("trailer") {
+ break
+ }
+ start, ok1 := tok.(int64)
+ n, ok2 := b.readToken().(int64)
+ if !ok1 || !ok2 {
+ return nil, fmt.Errorf("malformed xref table")
+ }
+ for i := 0; i < int(n); i++ {
+ off, ok1 := b.readToken().(int64)
+ gen, ok2 := b.readToken().(int64)
+ alloc, ok3 := b.readToken().(keyword)
+ if !ok1 || !ok2 || !ok3 || alloc != keyword("f") && alloc != keyword("n") {
+ return nil, fmt.Errorf("malformed xref table")
+ }
+ x := int(start) + i
+ for cap(table) <= x {
+ table = append(table[:cap(table)], xref{})
+ }
+ if len(table) <= x {
+ table = table[:x+1]
+ }
+ if alloc == "n" && table[x].offset == 0 {
+ table[x] = xref{ptr: objptr{uint32(x), uint16(gen)}, offset: int64(off)}
+ }
+ }
+ }
+ return table, nil
+}
+
+func findLastLine(buf []byte, s string) int {
+ bs := []byte(s)
+ max := len(buf)
+ for {
+ i := bytes.LastIndex(buf[:max], bs)
+ if i <= 0 || i+len(bs) >= len(buf) {
+ return -1
+ }
+ if (buf[i-1] == '\n' || buf[i-1] == '\r') && (buf[i+len(bs)] == '\n' || buf[i+len(bs)] == '\r') {
+ return i
+ }
+ max = i
+ }
+}
+
+// A Value is a single PDF value, such as an integer, dictionary, or array.
+// The zero Value is a PDF null (Kind() == Null, IsNull() = true).
+type Value struct {
+ r *Reader
+ ptr objptr
+ data interface{}
+}
+
+// IsNull reports whether the value is a null. It is equivalent to Kind() == Null.
+func (v Value) IsNull() bool {
+ return v.data == nil
+}
+
+// A ValueKind specifies the kind of data underlying a Value.
+type ValueKind int
+
+// The PDF value kinds.
+const (
+ Null ValueKind = iota
+ Bool
+ Integer
+ Real
+ String
+ Name
+ Dict
+ Array
+ Stream
+)
+
+// Kind reports the kind of value underlying v.
+func (v Value) Kind() ValueKind {
+ switch v.data.(type) {
+ default:
+ return Null
+ case bool:
+ return Bool
+ case int64:
+ return Integer
+ case float64:
+ return Real
+ case string:
+ return String
+ case name:
+ return Name
+ case dict:
+ return Dict
+ case array:
+ return Array
+ case stream:
+ return Stream
+ }
+}
+
+// String returns a textual representation of the value v.
+// Note that String is not the accessor for values with Kind() == String.
+// To access such values, see RawString, Text, and TextFromUTF16.
+func (v Value) String() string {
+ return objfmt(v.data)
+}
+
+func objfmt(x interface{}) string {
+ switch x := x.(type) {
+ default:
+ return fmt.Sprint(x)
+ case string:
+ if isPDFDocEncoded(x) {
+ return strconv.Quote(pdfDocDecode(x))
+ }
+ if isUTF16(x) {
+ return strconv.Quote(utf16Decode(x[2:]))
+ }
+ return strconv.Quote(x)
+ case name:
+ return "/" + string(x)
+ case dict:
+ var keys []string
+ for k := range x {
+ keys = append(keys, string(k))
+ }
+ sort.Strings(keys)
+ var buf bytes.Buffer
+ buf.WriteString("<<")
+ for i, k := range keys {
+ elem := x[name(k)]
+ if i > 0 {
+ buf.WriteString(" ")
+ }
+ buf.WriteString("/")
+ buf.WriteString(k)
+ buf.WriteString(" ")
+ buf.WriteString(objfmt(elem))
+ }
+ buf.WriteString(">>")
+ return buf.String()
+
+ case array:
+ var buf bytes.Buffer
+ buf.WriteString("[")
+ for i, elem := range x {
+ if i > 0 {
+ buf.WriteString(" ")
+ }
+ buf.WriteString(objfmt(elem))
+ }
+ buf.WriteString("]")
+ return buf.String()
+
+ case stream:
+ return fmt.Sprintf("%v@%d", objfmt(x.hdr), x.offset)
+
+ case objptr:
+ return fmt.Sprintf("%d %d R", x.id, x.gen)
+
+ case objdef:
+ return fmt.Sprintf("{%d %d obj}%v", x.ptr.id, x.ptr.gen, objfmt(x.obj))
+ }
+}
+
+// Bool returns v's boolean value.
+// If v.Kind() != Bool, Bool returns false.
+func (v Value) Bool() bool {
+ x, ok := v.data.(bool)
+ if !ok {
+ return false
+ }
+ return x
+}
+
+// Int64 returns v's int64 value.
+// If v.Kind() != Int64, Int64 returns 0.
+func (v Value) Int64() int64 {
+ x, ok := v.data.(int64)
+ if !ok {
+ return 0
+ }
+ return x
+}
+
+// Float64 returns v's float64 value, converting from integer if necessary.
+// If v.Kind() != Float64 and v.Kind() != Int64, Float64 returns 0.
+func (v Value) Float64() float64 {
+ x, ok := v.data.(float64)
+ if !ok {
+ x, ok := v.data.(int64)
+ if ok {
+ return float64(x)
+ }
+ return 0
+ }
+ return x
+}
+
+// RawString returns v's string value.
+// If v.Kind() != String, RawString returns the empty string.
+func (v Value) RawString() string {
+ x, ok := v.data.(string)
+ if !ok {
+ return ""
+ }
+ return x
+}
+
+// Text returns v's string value interpreted as a “text string” (defined in the PDF spec)
+// and converted to UTF-8.
+// If v.Kind() != String, Text returns the empty string.
+func (v Value) Text() string {
+ x, ok := v.data.(string)
+ if !ok {
+ return ""
+ }
+ if isPDFDocEncoded(x) {
+ return pdfDocDecode(x)
+ }
+ if isUTF16(x) {
+ return utf16Decode(x[2:])
+ }
+ return x
+}
+
+// TextFromUTF16 returns v's string value interpreted as big-endian UTF-16
+// and then converted to UTF-8.
+// If v.Kind() != String or if the data is not valid UTF-16, TextFromUTF16 returns
+// the empty string.
+func (v Value) TextFromUTF16() string {
+ x, ok := v.data.(string)
+ if !ok {
+ return ""
+ }
+ if len(x)%2 == 1 {
+ return ""
+ }
+ if x == "" {
+ return ""
+ }
+ return utf16Decode(x)
+}
+
+// Name returns v's name value.
+// If v.Kind() != Name, Name returns the empty string.
+// The returned name does not include the leading slash:
+// if v corresponds to the name written using the syntax /Helvetica,
+// Name() == "Helvetica".
+func (v Value) Name() string {
+ x, ok := v.data.(name)
+ if !ok {
+ return ""
+ }
+ return string(x)
+}
+
+// Key returns the value associated with the given name key in the dictionary v.
+// Like the result of the Name method, the key should not include a leading slash.
+// If v is a stream, Key applies to the stream's header dictionary.
+// If v.Kind() != Dict and v.Kind() != Stream, Key returns a null Value.
+func (v Value) Key(key string) Value {
+ x, ok := v.data.(dict)
+ if !ok {
+ strm, ok := v.data.(stream)
+ if !ok {
+ return Value{}
+ }
+ x = strm.hdr
+ }
+ return v.r.resolve(v.ptr, x[name(key)])
+}
+
+// Keys returns a sorted list of the keys in the dictionary v.
+// If v is a stream, Keys applies to the stream's header dictionary.
+// If v.Kind() != Dict and v.Kind() != Stream, Keys returns nil.
+func (v Value) Keys() []string {
+ x, ok := v.data.(dict)
+ if !ok {
+ strm, ok := v.data.(stream)
+ if !ok {
+ return nil
+ }
+ x = strm.hdr
+ }
+ keys := []string{} // not nil
+ for k := range x {
+ keys = append(keys, string(k))
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// Index returns the i'th element in the array v.
+// If v.Kind() != Array or if i is outside the array bounds,
+// Index returns a null Value.
+func (v Value) Index(i int) Value {
+ x, ok := v.data.(array)
+ if !ok || i < 0 || i >= len(x) {
+ return Value{}
+ }
+ return v.r.resolve(v.ptr, x[i])
+}
+
+// Len returns the length of the array v.
+// If v.Kind() != Array, Len returns 0.
+func (v Value) Len() int {
+ x, ok := v.data.(array)
+ if !ok {
+ return 0
+ }
+ return len(x)
+}
+
+func (r *Reader) resolve(parent objptr, x interface{}) Value {
+ if ptr, ok := x.(objptr); ok {
+ if ptr.id >= uint32(len(r.xref)) {
+ return Value{}
+ }
+ xref := r.xref[ptr.id]
+ if xref.ptr != ptr || !xref.inStream && xref.offset == 0 {
+ return Value{}
+ }
+ var obj object
+ if xref.inStream {
+ strm := r.resolve(parent, xref.stream)
+ Search:
+ for {
+ if strm.Kind() != Stream {
+ panic("not a stream")
+ }
+ if strm.Key("Type").Name() != "ObjStm" {
+ panic("not an object stream")
+ }
+ n := int(strm.Key("N").Int64())
+ first := strm.Key("First").Int64()
+ if first == 0 {
+ panic("missing First")
+ }
+ b := newBuffer(strm.Reader(), 0)
+ b.allowEOF = true
+ for i := 0; i < n; i++ {
+ id, _ := b.readToken().(int64)
+ off, _ := b.readToken().(int64)
+ if uint32(id) == ptr.id {
+ b.seekForward(first + off)
+ x = b.readObject()
+ break Search
+ }
+ }
+ ext := strm.Key("Extends")
+ if ext.Kind() != Stream {
+ panic("cannot find object in stream")
+ }
+ strm = ext
+ }
+ } else {
+ b := newBuffer(io.NewSectionReader(r.f, xref.offset, r.end-xref.offset), xref.offset)
+ b.key = r.key
+ b.useAES = r.useAES
+ obj = b.readObject()
+ def, ok := obj.(objdef)
+ if !ok {
+ panic(fmt.Errorf("loading %v: found %T instead of objdef", ptr, obj))
+ return Value{}
+ }
+ if def.ptr != ptr {
+ panic(fmt.Errorf("loading %v: found %v", ptr, def.ptr))
+ }
+ x = def.obj
+ }
+ parent = ptr
+ }
+
+ switch x := x.(type) {
+ case nil, bool, int64, float64, name, dict, array, stream:
+ return Value{r, parent, x}
+ case string:
+ return Value{r, parent, x}
+ default:
+ panic(fmt.Errorf("unexpected value type %T in resolve", x))
+ }
+}
+
+type errorReadCloser struct {
+ err error
+}
+
+func (e *errorReadCloser) Read([]byte) (int, error) {
+ return 0, e.err
+}
+
+func (e *errorReadCloser) Close() error {
+ return e.err
+}
+
+// Reader returns the data contained in the stream v.
+// If v.Kind() != Stream, Reader returns a ReadCloser that
+// responds to all reads with a “stream not present” error.
+func (v Value) Reader() io.ReadCloser {
+ x, ok := v.data.(stream)
+ if !ok {
+ return &errorReadCloser{fmt.Errorf("stream not present")}
+ }
+ var rd io.Reader
+ rd = io.NewSectionReader(v.r.f, x.offset, v.Key("Length").Int64())
+ if v.r.key != nil {
+ rd = decryptStream(v.r.key, v.r.useAES, x.ptr, rd)
+ }
+ filter := v.Key("Filter")
+ param := v.Key("DecodeParms")
+ switch filter.Kind() {
+ default:
+ panic(fmt.Errorf("unsupported filter %v", filter))
+ case Null:
+ // ok
+ case Name:
+ rd = applyFilter(rd, filter.Name(), param)
+ case Array:
+ for i := 0; i < filter.Len(); i++ {
+ rd = applyFilter(rd, filter.Index(i).Name(), param.Index(i))
+ }
+ }
+
+ return ioutil.NopCloser(rd)
+}
+
+func applyFilter(rd io.Reader, name string, param Value) io.Reader {
+ switch name {
+ default:
+ panic("unknown filter " + name)
+ case "FlateDecode":
+ zr, err := zlib.NewReader(rd)
+ if err != nil {
+ panic(err)
+ }
+ pred := param.Key("Predictor")
+ if pred.Kind() == Null {
+ return zr
+ }
+ columns := param.Key("Columns").Int64()
+ switch pred.Int64() {
+ default:
+ if DebugOn {
+ fmt.Println("unknown predictor", pred)
+ }
+ panic("pred")
+ case 12:
+ return &pngUpReader{r: zr, hist: make([]byte, 1+columns), tmp: make([]byte, 1+columns)}
+ }
+ case "ASCII85Decode":
+ cleanASCII85 := newAlphaReader(rd)
+ decoder := ascii85.NewDecoder(cleanASCII85)
+
+ switch param.Keys() {
+ default:
+ if DebugOn {
+ fmt.Println("param=", param)
+ }
+ panic("not expected DecodeParms for ascii85")
+ case nil:
+ return decoder
+ }
+ }
+}
+
+type pngUpReader struct {
+ r io.Reader
+ hist []byte
+ tmp []byte
+ pend []byte
+}
+
+func (r *pngUpReader) Read(b []byte) (int, error) {
+ n := 0
+ for len(b) > 0 {
+ if len(r.pend) > 0 {
+ m := copy(b, r.pend)
+ n += m
+ b = b[m:]
+ r.pend = r.pend[m:]
+ continue
+ }
+ _, err := io.ReadFull(r.r, r.tmp)
+ if err != nil {
+ return n, err
+ }
+ if r.tmp[0] != 2 {
+ return n, fmt.Errorf("malformed PNG-Up encoding")
+ }
+ for i, b := range r.tmp {
+ r.hist[i] += b
+ }
+ r.pend = r.hist[1:]
+ }
+ return n, nil
+}
+
+var passwordPad = []byte{
+ 0x28, 0xBF, 0x4E, 0x5E, 0x4E, 0x75, 0x8A, 0x41, 0x64, 0x00, 0x4E, 0x56, 0xFF, 0xFA, 0x01, 0x08,
+ 0x2E, 0x2E, 0x00, 0xB6, 0xD0, 0x68, 0x3E, 0x80, 0x2F, 0x0C, 0xA9, 0xFE, 0x64, 0x53, 0x69, 0x7A,
+}
+
+func (r *Reader) initEncrypt(password string) error {
+ // See PDF 32000-1:2008, §7.6.
+ encrypt, _ := r.resolve(objptr{}, r.trailer["Encrypt"]).data.(dict)
+ if encrypt["Filter"] != name("Standard") {
+ return fmt.Errorf("unsupported PDF: encryption filter %v", objfmt(encrypt["Filter"]))
+ }
+ n, _ := encrypt["Length"].(int64)
+ if n == 0 {
+ n = 40
+ }
+ if n%8 != 0 || n > 128 || n < 40 {
+ return fmt.Errorf("malformed PDF: %d-bit encryption key", n)
+ }
+ V, _ := encrypt["V"].(int64)
+ if V != 1 && V != 2 && (V != 4 || !okayV4(encrypt)) {
+ return fmt.Errorf("unsupported PDF: encryption version V=%d; %v", V, objfmt(encrypt))
+ }
+
+ ids, ok := r.trailer["ID"].(array)
+ if !ok || len(ids) < 1 {
+ return fmt.Errorf("malformed PDF: missing ID in trailer")
+ }
+ idstr, ok := ids[0].(string)
+ if !ok {
+ return fmt.Errorf("malformed PDF: missing ID in trailer")
+ }
+ ID := []byte(idstr)
+
+ R, _ := encrypt["R"].(int64)
+ if R < 2 {
+ return fmt.Errorf("malformed PDF: encryption revision R=%d", R)
+ }
+ if R > 4 {
+ return fmt.Errorf("unsupported PDF: encryption revision R=%d", R)
+ }
+ O, _ := encrypt["O"].(string)
+ U, _ := encrypt["U"].(string)
+ if len(O) != 32 || len(U) != 32 {
+ return fmt.Errorf("malformed PDF: missing O= or U= encryption parameters")
+ }
+ p, _ := encrypt["P"].(int64)
+ P := uint32(p)
+
+ // TODO: Password should be converted to Latin-1.
+ pw := []byte(password)
+ h := md5.New()
+ if len(pw) >= 32 {
+ h.Write(pw[:32])
+ } else {
+ h.Write(pw)
+ h.Write(passwordPad[:32-len(pw)])
+ }
+ h.Write([]byte(O))
+ h.Write([]byte{byte(P), byte(P >> 8), byte(P >> 16), byte(P >> 24)})
+ h.Write([]byte(ID))
+ key := h.Sum(nil)
+
+ if R >= 3 {
+ for i := 0; i < 50; i++ {
+ h.Reset()
+ h.Write(key[:n/8])
+ key = h.Sum(key[:0])
+ }
+ key = key[:n/8]
+ } else {
+ key = key[:40/8]
+ }
+
+ c, err := rc4.NewCipher(key)
+ if err != nil {
+ return fmt.Errorf("malformed PDF: invalid RC4 key: %v", err)
+ }
+
+ var u []byte
+ if R == 2 {
+ u = make([]byte, 32)
+ copy(u, passwordPad)
+ c.XORKeyStream(u, u)
+ } else {
+ h.Reset()
+ h.Write(passwordPad)
+ h.Write([]byte(ID))
+ u = h.Sum(nil)
+ c.XORKeyStream(u, u)
+
+ for i := 1; i <= 19; i++ {
+ key1 := make([]byte, len(key))
+ copy(key1, key)
+ for j := range key1 {
+ key1[j] ^= byte(i)
+ }
+ c, _ = rc4.NewCipher(key1)
+ c.XORKeyStream(u, u)
+ }
+ }
+
+ if !bytes.HasPrefix([]byte(U), u) {
+ return ErrInvalidPassword
+ }
+
+ r.key = key
+ r.useAES = V == 4
+
+ return nil
+}
+
+var ErrInvalidPassword = fmt.Errorf("encrypted PDF: invalid password")
+
+func okayV4(encrypt dict) bool {
+ cf, ok := encrypt["CF"].(dict)
+ if !ok {
+ return false
+ }
+ stmf, ok := encrypt["StmF"].(name)
+ if !ok {
+ return false
+ }
+ strf, ok := encrypt["StrF"].(name)
+ if !ok {
+ return false
+ }
+ if stmf != strf {
+ return false
+ }
+ cfparam, ok := cf[stmf].(dict)
+ if cfparam["AuthEvent"] != nil && cfparam["AuthEvent"] != name("DocOpen") {
+ return false
+ }
+ if cfparam["Length"] != nil && cfparam["Length"] != int64(16) {
+ return false
+ }
+ if cfparam["CFM"] != name("AESV2") {
+ return false
+ }
+ return true
+}
+
+func cryptKey(key []byte, useAES bool, ptr objptr) []byte {
+ h := md5.New()
+ h.Write(key)
+ h.Write([]byte{byte(ptr.id), byte(ptr.id >> 8), byte(ptr.id >> 16), byte(ptr.gen), byte(ptr.gen >> 8)})
+ if useAES {
+ h.Write([]byte("sAlT"))
+ }
+ return h.Sum(nil)
+}
+
+func decryptString(key []byte, useAES bool, ptr objptr, x string) string {
+ key = cryptKey(key, useAES, ptr)
+ if useAES {
+ s := []byte(x)
+ if len(s) < aes.BlockSize {
+ panic("Encrypted text shorter that AES block size")
+ }
+
+ block, _ := aes.NewCipher(key)
+ iv := s[:aes.BlockSize]
+ s = s[aes.BlockSize:]
+
+ stream := cipher.NewCBCDecrypter(block, iv)
+ stream.CryptBlocks(s, s)
+ x = string(s)
+ } else {
+ c, _ := rc4.NewCipher(key)
+ data := []byte(x)
+ c.XORKeyStream(data, data)
+ x = string(data)
+ }
+ return x
+}
+
+func decryptStream(key []byte, useAES bool, ptr objptr, rd io.Reader) io.Reader {
+ key = cryptKey(key, useAES, ptr)
+ if useAES {
+ cb, err := aes.NewCipher(key)
+ if err != nil {
+ panic("AES: " + err.Error())
+ }
+ iv := make([]byte, 16)
+ io.ReadFull(rd, iv)
+ cbc := cipher.NewCBCDecrypter(cb, iv)
+ rd = &cbcReader{cbc: cbc, rd: rd, buf: make([]byte, 16)}
+ } else {
+ c, _ := rc4.NewCipher(key)
+ rd = &cipher.StreamReader{c, rd}
+ }
+ return rd
+}
+
+type cbcReader struct {
+ cbc cipher.BlockMode
+ rd io.Reader
+ buf []byte
+ pend []byte
+}
+
+func (r *cbcReader) Read(b []byte) (n int, err error) {
+ if len(r.pend) == 0 {
+ _, err = io.ReadFull(r.rd, r.buf)
+ if err != nil {
+ return 0, err
+ }
+ r.cbc.CryptBlocks(r.buf, r.buf)
+ r.pend = r.buf
+ }
+ n = copy(b, r.pend)
+ r.pend = r.pend[n:]
+ return n, nil
+}
diff --git a/godo/office/pdf/text.go b/godo/office/pdf/text.go
new file mode 100644
index 0000000..c377fb0
--- /dev/null
+++ b/godo/office/pdf/text.go
@@ -0,0 +1,154 @@
+package pdf
+
+import (
+ "unicode"
+ "unicode/utf16"
+)
+
+const noRune = unicode.ReplacementChar
+
+func isPDFDocEncoded(s string) bool {
+ if isUTF16(s) {
+ return false
+ }
+ for i := 0; i < len(s); i++ {
+ if pdfDocEncoding[s[i]] == noRune {
+ return false
+ }
+ }
+ return true
+}
+
+func pdfDocDecode(s string) string {
+ for i := 0; i < len(s); i++ {
+ if s[i] >= 0x80 || pdfDocEncoding[s[i]] != rune(s[i]) {
+ goto Decode
+ }
+ }
+ return s
+
+Decode:
+ r := make([]rune, len(s))
+ for i := 0; i < len(s); i++ {
+ r[i] = pdfDocEncoding[s[i]]
+ }
+ return string(r)
+}
+
+func isUTF16(s string) bool {
+ return len(s) >= 2 && s[0] == 0xfe && s[1] == 0xff && len(s)%2 == 0
+}
+
+func utf16Decode(s string) string {
+ var u []uint16
+ for i := 0; i < len(s); i += 2 {
+ u = append(u, uint16(s[i])<<8|uint16(s[i+1]))
+ }
+ return string(utf16.Decode(u))
+}
+
+// See PDF 32000-1:2008, Table D.2
+var pdfDocEncoding = [256]rune{
+ noRune, noRune, noRune, noRune, noRune, noRune, noRune, noRune,
+ noRune, 0x0009, 0x000a, noRune, noRune, 0x000d, noRune, noRune,
+ noRune, noRune, noRune, noRune, noRune, noRune, noRune, noRune,
+ 0x02d8, 0x02c7, 0x02c6, 0x02d9, 0x02dd, 0x02db, 0x02da, 0x02dc,
+ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
+ 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
+ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
+ 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f,
+ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
+ 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
+ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
+ 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f,
+ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
+ 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
+ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
+ 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, noRune,
+ 0x2022, 0x2020, 0x2021, 0x2026, 0x2014, 0x2013, 0x0192, 0x2044,
+ 0x2039, 0x203a, 0x2212, 0x2030, 0x201e, 0x201c, 0x201d, 0x2018,
+ 0x2019, 0x201a, 0x2122, 0xfb01, 0xfb02, 0x0141, 0x0152, 0x0160,
+ 0x0178, 0x017d, 0x0131, 0x0142, 0x0153, 0x0161, 0x017e, noRune,
+ 0x20ac, 0x00a1, 0x00a2, 0x00a3, 0x00a4, 0x00a5, 0x00a6, 0x00a7,
+ 0x00a8, 0x00a9, 0x00aa, 0x00ab, 0x00ac, noRune, 0x00ae, 0x00af,
+ 0x00b0, 0x00b1, 0x00b2, 0x00b3, 0x00b4, 0x00b5, 0x00b6, 0x00b7,
+ 0x00b8, 0x00b9, 0x00ba, 0x00bb, 0x00bc, 0x00bd, 0x00be, 0x00bf,
+ 0x00c0, 0x00c1, 0x00c2, 0x00c3, 0x00c4, 0x00c5, 0x00c6, 0x00c7,
+ 0x00c8, 0x00c9, 0x00ca, 0x00cb, 0x00cc, 0x00cd, 0x00ce, 0x00cf,
+ 0x00d0, 0x00d1, 0x00d2, 0x00d3, 0x00d4, 0x00d5, 0x00d6, 0x00d7,
+ 0x00d8, 0x00d9, 0x00da, 0x00db, 0x00dc, 0x00dd, 0x00de, 0x00df,
+ 0x00e0, 0x00e1, 0x00e2, 0x00e3, 0x00e4, 0x00e5, 0x00e6, 0x00e7,
+ 0x00e8, 0x00e9, 0x00ea, 0x00eb, 0x00ec, 0x00ed, 0x00ee, 0x00ef,
+ 0x00f0, 0x00f1, 0x00f2, 0x00f3, 0x00f4, 0x00f5, 0x00f6, 0x00f7,
+ 0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x00fd, 0x00fe, 0x00ff,
+}
+
+var winAnsiEncoding = [256]rune{
+ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
+ 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
+ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
+ 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f,
+ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
+ 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
+ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
+ 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f,
+ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
+ 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
+ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
+ 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f,
+ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
+ 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
+ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
+ 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f,
+ 0x20ac, noRune, 0x201a, 0x0192, 0x201e, 0x2026, 0x2020, 0x2021,
+ 0x02c6, 0x2030, 0x0160, 0x2039, 0x0152, noRune, 0x017d, noRune,
+ noRune, 0x2018, 0x2019, 0x201c, 0x201d, 0x2022, 0x2013, 0x2014,
+ 0x02dc, 0x2122, 0x0161, 0x203a, 0x0153, noRune, 0x017e, 0x0178,
+ 0x00a0, 0x00a1, 0x00a2, 0x00a3, 0x00a4, 0x00a5, 0x00a6, 0x00a7,
+ 0x00a8, 0x00a9, 0x00aa, 0x00ab, 0x00ac, 0x00ad, 0x00ae, 0x00af,
+ 0x00b0, 0x00b1, 0x00b2, 0x00b3, 0x00b4, 0x00b5, 0x00b6, 0x00b7,
+ 0x00b8, 0x00b9, 0x00ba, 0x00bb, 0x00bc, 0x00bd, 0x00be, 0x00bf,
+ 0x00c0, 0x00c1, 0x00c2, 0x00c3, 0x00c4, 0x00c5, 0x00c6, 0x00c7,
+ 0x00c8, 0x00c9, 0x00ca, 0x00cb, 0x00cc, 0x00cd, 0x00ce, 0x00cf,
+ 0x00d0, 0x00d1, 0x00d2, 0x00d3, 0x00d4, 0x00d5, 0x00d6, 0x00d7,
+ 0x00d8, 0x00d9, 0x00da, 0x00db, 0x00dc, 0x00dd, 0x00de, 0x00df,
+ 0x00e0, 0x00e1, 0x00e2, 0x00e3, 0x00e4, 0x00e5, 0x00e6, 0x00e7,
+ 0x00e8, 0x00e9, 0x00ea, 0x00eb, 0x00ec, 0x00ed, 0x00ee, 0x00ef,
+ 0x00f0, 0x00f1, 0x00f2, 0x00f3, 0x00f4, 0x00f5, 0x00f6, 0x00f7,
+ 0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x00fd, 0x00fe, 0x00ff,
+}
+
+var macRomanEncoding = [256]rune{
+ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
+ 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
+ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
+ 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f,
+ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
+ 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
+ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
+ 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f,
+ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
+ 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
+ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
+ 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f,
+ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
+ 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
+ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
+ 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f,
+ 0x00c4, 0x00c5, 0x00c7, 0x00c9, 0x00d1, 0x00d6, 0x00dc, 0x00e1,
+ 0x00e0, 0x00e2, 0x00e4, 0x00e3, 0x00e5, 0x00e7, 0x00e9, 0x00e8,
+ 0x00ea, 0x00eb, 0x00ed, 0x00ec, 0x00ee, 0x00ef, 0x00f1, 0x00f3,
+ 0x00f2, 0x00f4, 0x00f6, 0x00f5, 0x00fa, 0x00f9, 0x00fb, 0x00fc,
+ 0x2020, 0x00b0, 0x00a2, 0x00a3, 0x00a7, 0x2022, 0x00b6, 0x00df,
+ 0x00ae, 0x00a9, 0x2122, 0x00b4, 0x00a8, 0x2260, 0x00c6, 0x00d8,
+ 0x221e, 0x00b1, 0x2264, 0x2265, 0x00a5, 0x00b5, 0x2202, 0x2211,
+ 0x220f, 0x03c0, 0x222b, 0x00aa, 0x00ba, 0x03a9, 0x00e6, 0x00f8,
+ 0x00bf, 0x00a1, 0x00ac, 0x221a, 0x0192, 0x2248, 0x2206, 0x00ab,
+ 0x00bb, 0x2026, 0x00a0, 0x00c0, 0x00c3, 0x00d5, 0x0152, 0x0153,
+ 0x2013, 0x2014, 0x201c, 0x201d, 0x2018, 0x2019, 0x00f7, 0x25ca,
+ 0x00ff, 0x0178, 0x2044, 0x20ac, 0x2039, 0x203a, 0xfb01, 0xfb02,
+ 0x2021, 0x00b7, 0x201a, 0x201e, 0x2030, 0x00c2, 0x00ca, 0x00c1,
+ 0x00cb, 0x00c8, 0x00cd, 0x00ce, 0x00cf, 0x00cc, 0x00d3, 0x00d4,
+ 0xf8ff, 0x00d2, 0x00da, 0x00db, 0x00d9, 0x0131, 0x02c6, 0x02dc,
+ 0x00af, 0x02d8, 0x02d9, 0x02da, 0x00b8, 0x02dd, 0x02db, 0x02c7,
+}
diff --git a/godo/office/ppt.go b/godo/office/ppt.go
new file mode 100644
index 0000000..9a97aa7
--- /dev/null
+++ b/godo/office/ppt.go
@@ -0,0 +1,358 @@
+package office
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/richardlehane/mscfb"
+ "golang.org/x/text/encoding"
+ "golang.org/x/text/encoding/unicode"
+ "golang.org/x/text/transform"
+)
+
+// skipped metadata or non-readable records in slide container
+var slideSkippedRecordsTypes = []recordType{
+ recordTypeExternalObjectList,
+ recordTypeEnvironment,
+ recordTypeSoundCollection,
+ recordTypeDrawingGroup,
+ recordTypeSlideListWithText,
+ recordTypeList,
+ recordTypeHeadersFooters,
+ recordTypeHeadersFooters,
+}
+
+// skipped metadata or non-readable records in drawing container
+var drawingSkippedRecordsTypes = []recordType{
+ recordTypeSlideShowSlideInfoAtom,
+ recordTypeHeadersFooters,
+ recordTypeRoundTripSlideSyncInfo12,
+}
+
+const (
+ userPersistIDRefOffset = 16
+)
+
+// ExtractText parses PPT file represented by Reader r and extracts text from it.
+func ExtractPPTText(r io.Reader) (string, error) {
+ ra := ToReaderAt(r)
+
+ d, err := mscfb.New(ra)
+ if err != nil {
+ return "", err
+ }
+ currentUser, pptDocument := getCurrentUserAndPPTDoc(d)
+ if err := isValidPPT(currentUser, pptDocument); err != nil {
+ return "", err
+ }
+ offsetPersistDirectory, liveRecord, err := getUserEditAtomsData(currentUser, pptDocument)
+ if err != nil {
+ return "", err
+ }
+ persistDirEntries, err := getPersistDirectoryEntries(pptDocument, offsetPersistDirectory)
+ if err != nil {
+ return "", err
+ }
+
+ // get DocumentContainer reference
+ docPersistIDRef := liveRecord.LongAt(userPersistIDRefOffset)
+ documentContainer, err := readRecord(pptDocument, persistDirEntries[docPersistIDRef], recordTypeDocument)
+ if err != nil {
+ return "", err
+ }
+
+ return readSlides(documentContainer, pptDocument, persistDirEntries)
+}
+
+// toMemoryBuffer transforms io.Reader to in-memory io.ReaderAt
+
+// getCurrentUserAndPPTDoc extracts necessary mscfb files from PPT file
+func getCurrentUserAndPPTDoc(r *mscfb.Reader) (currentUser *mscfb.File, pptDocument *mscfb.File) {
+ for _, f := range r.File {
+ switch f.Name {
+ case "Current User":
+ currentUser = f
+ case "PowerPoint Document":
+ pptDocument = f
+ }
+ }
+ return currentUser, pptDocument
+}
+
+// isValidPPT checks if provided file is valid, meaning
+// it has both "Current User" and "PowerPoint Document" files
+// and "Current User"'s CurrentUserAtom record has valid header token
+func isValidPPT(currentUser, pptDocument *mscfb.File) error {
+ const (
+ headerTokenOffset = 12
+ encryptedDocumentToken = 0xF3D1C4DF
+ plainDocumentToken = 0xE391C05F
+ )
+
+ if currentUser == nil || pptDocument == nil {
+ return fmt.Errorf(".ppt file must contain \"Current User\" and \"PowerPoint Document\" streams")
+ }
+ var b [4]byte
+ _, err := currentUser.ReadAt(b[:], headerTokenOffset)
+ if err != nil {
+ return err
+ }
+ headerToken := binary.LittleEndian.Uint32(b[:])
+ if headerToken != plainDocumentToken && headerToken != encryptedDocumentToken {
+ return fmt.Errorf("invalid UserEditAtom header token %X", headerToken)
+ }
+ return nil
+}
+
+// getUserEditAtomsData extracts "live record" and persist directory offsets
+// according to section 2.1.2 of specification (https://msopenspecs.azureedge.net/files/MS-PPT/%5bMS-PPT%5d-210422.pdf)
+func getUserEditAtomsData(currentUser, pptDocument *mscfb.File) (
+ persistDirectoryOffsets []int64,
+ liveRecord record,
+ err error,
+) {
+ const (
+ offsetLastEditInitialPosition = 16
+ offsetLastEditPosition = 8
+ persistDirectoryOffsetPosition = 12
+ )
+ var b [4]byte
+ _, err = currentUser.ReadAt(b[:], offsetLastEditInitialPosition)
+ if err != nil {
+ return nil, record{}, err
+ }
+ offsetLastEdit := binary.LittleEndian.Uint32(b[:])
+
+ for {
+ liveRecord, err = readRecord(pptDocument, int64(offsetLastEdit), recordTypeUserEditAtom)
+ if err != nil {
+ if errors.Is(err, errMismatchRecordType) {
+ break
+ }
+ return nil, record{}, err
+ }
+ persistDirectoryOffsets = append(
+ persistDirectoryOffsets,
+ int64(liveRecord.LongAt(persistDirectoryOffsetPosition)),
+ )
+ offsetLastEdit = liveRecord.LongAt(offsetLastEditPosition)
+ if offsetLastEdit == 0 {
+ break
+ }
+ }
+
+ return persistDirectoryOffsets, liveRecord, err
+}
+
+// getPersistDirectoryEntries transforms offsets into persists directory identifiers and persist offsets according
+// to section 2.1.2 of specification (https://msopenspecs.azureedge.net/files/MS-PPT/%5bMS-PPT%5d-210422.pdf)
+func getPersistDirectoryEntries(pptDocument *mscfb.File, offsets []int64) (map[uint32]int64, error) {
+ const persistOffsetEntrySize = 4
+
+ persistDirEntries := make(map[uint32]int64)
+ for i := len(offsets) - 1; i >= 0; i-- {
+ rgPersistDirEntry, err := readRecord(pptDocument, offsets[i], recordTypePersistDirectoryAtom)
+ if err != nil {
+ return nil, err
+ }
+
+ rgPersistDirEntryData := rgPersistDirEntry.recordData
+
+ for j := 0; j < len(rgPersistDirEntryData); {
+ persist := rgPersistDirEntryData.LongAt(j)
+ persistID := persist & 0x000FFFFF
+ cPersist := ((persist & 0xFFF00000) >> 20) & 0x00000FFF
+ j += 4
+
+ for k := uint32(0); k < cPersist; k++ {
+ persistDirEntries[persistID+k] = int64(rgPersistDirEntryData.LongAt(j + int(k)*persistOffsetEntrySize))
+ }
+ j += int(cPersist * persistOffsetEntrySize)
+ }
+ }
+ return persistDirEntries, nil
+}
+
+// readSlides reads text from slides of given DocumentContainer
+func readSlides(documentContainer, pptDocument io.ReaderAt, persistDirEntries map[uint32]int64) (string, error) {
+ const slideSkipInitialOffset = 48
+ offset, err := skipRecords(documentContainer, slideSkipInitialOffset, slideSkippedRecordsTypes)
+ if err != nil {
+ return "", err
+ }
+ slideList, err := readRecord(documentContainer, offset, recordTypeSlideListWithText)
+ if err != nil {
+ return "", err
+ }
+
+ utf16Decoder := unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM).NewDecoder()
+
+ var out strings.Builder
+ n := len(slideList.Data())
+ for i := 0; i < n; {
+ block, err := readRecord(slideList, int64(i), recordTypeUnspecified)
+ if err != nil {
+ return "", err
+ }
+ switch block.Type() {
+ case recordTypeSlidePersistAtom:
+ err = readTextFromSlidePersistAtom(block, pptDocument, persistDirEntries, &out, utf16Decoder)
+ case recordTypeTextCharsAtom:
+ err = readTextFromTextCharsAtom(block, &out, utf16Decoder)
+ case recordTypeTextBytesAtom:
+ err = readTextFromTextBytesAtom(block, &out, utf16Decoder)
+ }
+ if err != nil {
+ return "", err
+ }
+
+ i += len(block.Data()) + 8
+ }
+
+ return out.String(), nil
+}
+
+func readTextFromSlidePersistAtom(
+ block record,
+ pptDocument io.ReaderAt,
+ persistDirEntries map[uint32]int64,
+ out *strings.Builder,
+ utf16Decoder *encoding.Decoder,
+) error {
+ const (
+ slidePersistAtomSkipInitialOffset = 32
+ headerRecordTypeOffset = 2
+ )
+
+ persistDirID := block.LongAt(0)
+ // extract slide from persist directory
+ slide, err := readRecord(pptDocument, persistDirEntries[persistDirID], recordTypeSlide)
+ if err != nil {
+ return err
+ }
+ // skip metadata
+ offset, err := skipRecords(slide, slidePersistAtomSkipInitialOffset, drawingSkippedRecordsTypes)
+ if err != nil {
+ return err
+ }
+
+ drawing, err := readRecord(slide, offset, recordTypeDrawing)
+ if err != nil {
+ return err
+ }
+ drawingBytes := drawing.Data()
+ from := 0
+ for {
+ // instead of parsing binary PPT format, search text records directly
+ pocketIdx := matchPocket(drawingBytes, from)
+ if pocketIdx == -1 {
+ break
+ }
+ // check if it is really a text record - recordType bytes must be preceded by 1-byte version and 3-byte instance
+ // fields with zero values
+ if pocketIdx >= 2 && bytes.Equal(drawingBytes[pocketIdx-headerRecordTypeOffset:pocketIdx], []byte{0x00, 0x00}) {
+ var rec record
+ if drawingBytes[pocketIdx] == recordTypeTextBytesAtom.LowerPart() {
+ rec, err = readRecord(drawing, int64(pocketIdx-headerRecordTypeOffset), recordTypeTextBytesAtom)
+ if err != nil {
+ return err
+ }
+ err = readTextFromTextBytesAtom(rec, out, utf16Decoder)
+ } else {
+ rec, err = readRecord(drawing, int64(pocketIdx-headerRecordTypeOffset), recordTypeTextCharsAtom)
+ if err != nil {
+ return err
+ }
+ err = readTextFromTextCharsAtom(rec, out, utf16Decoder)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ from = pocketIdx + 2
+ }
+ return nil
+}
+
+func matchPocket(data []byte, from int) int {
+ data = data[from:]
+ n := len(data)
+ for i := 0; i < n; i++ {
+ switch data[i] {
+ case recordTypeTextCharsAtom.LowerPart(), recordTypeTextBytesAtom.LowerPart():
+ if i < n-1 && data[i+1] == 0x0F {
+ return i + from
+ }
+ }
+ }
+ return -1
+}
+
+// readTextFromTextCharsAtom simply transforms UTF-16LE data into UTF-8 data
+func readTextFromTextCharsAtom(atom record, out *strings.Builder, dec *encoding.Decoder) error {
+ dec.Reset()
+ transformed, err := dec.Bytes(atom.Data())
+ if err != nil {
+ return err
+ }
+ out.Write(transformed)
+ out.WriteByte(' ')
+ return nil
+}
+
+func readTextFromTextBytesAtom(atom record, out *strings.Builder, dec *encoding.Decoder) error {
+ dec.Reset()
+ transformed, err := decodeTextBytesAtom(atom.Data(), dec)
+ if err != nil {
+ return err
+ }
+ out.Write(transformed)
+ out.WriteByte(' ')
+ return nil
+}
+
+// decodeTextBytesAtom transforms text from TextBytesAtom, which is an array of bytes representing lower parts of UTF-16
+// characters into UTF-8 data
+func decodeTextBytesAtom(data []byte, dec *encoding.Decoder) ([]byte, error) {
+ var (
+ // buffer for UTF-16 char
+ buf [2]byte
+ err error
+ )
+ result := make([]byte, 0, len(data))
+ for i := range data {
+ // filling upper part of character with zero
+ clear(buf[:])
+ // fill lower part with byte
+ buf[0] = data[i]
+
+ // transform single UTF-16 char into UTF-8 rune and append it into result
+ result, _, err = transform.Append(dec, result, buf[:])
+ if err != nil {
+ return nil, err
+ }
+ }
+ return result, nil
+}
+
+// skipRecords reads headers and skips data of records of provided types
+func skipRecords(r io.ReaderAt, initialOffset int64, skippedRecordsTypes []recordType) (int64, error) {
+ offset := initialOffset
+
+ for i := range skippedRecordsTypes {
+ rec, err := readRecordHeaderOnly(r, offset, skippedRecordsTypes[i])
+ if err != nil {
+ if errors.Is(err, errMismatchRecordType) {
+ continue
+ }
+ return 0, err
+ }
+ offset += int64(rec.Length() + headerSize)
+ }
+
+ return offset, nil
+}
diff --git a/godo/office/pptx.go b/godo/office/pptx.go
new file mode 100644
index 0000000..5732bb3
--- /dev/null
+++ b/godo/office/pptx.go
@@ -0,0 +1,208 @@
+package office
+
+import (
+ "archive/zip"
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+type PowerPoint struct {
+ Files []*zip.File
+ Slides map[string]string
+ NotesSlides map[string]string
+ Themes map[string]string
+ Images map[string]string
+ Presentation string
+}
+
+func ReadPowerPoint(path string) (*PowerPoint, error) {
+ var p PowerPoint
+ p.Slides = make(map[string]string)
+ p.NotesSlides = make(map[string]string)
+ p.Themes = make(map[string]string)
+ p.Images = make(map[string]string)
+ f, err := zip.OpenReader(path)
+ if err != nil {
+ return nil, fmt.Errorf("Error opening file" + err.Error())
+ }
+ p.Files = f.File
+
+ for _, file := range p.Files {
+ if strings.Contains(file.Name, "ppt/slides/slide") {
+ slideOpen, _ := file.Open()
+ p.Slides[file.Name] = string(readCloserToByte(slideOpen))
+ }
+ if strings.Contains(file.Name, "ppt/notesSlides/notesSlide") {
+ notesSlideOpen, _ := file.Open()
+ p.NotesSlides[file.Name] = string(readCloserToByte(notesSlideOpen))
+ }
+ if strings.Contains(file.Name, "ppt/theme/theme") {
+ themeOpen, _ := file.Open()
+ p.Themes[file.Name] = string(readCloserToByte(themeOpen))
+ }
+ if strings.Contains(file.Name, "ppt/media/image") {
+ imageOpen, _ := file.Open()
+ p.Images[file.Name] = string(readCloserToByte(imageOpen))
+ }
+ if strings.Contains(file.Name, "ppt/presentation.xml") {
+ presentationOpen, _ := file.Open()
+ p.Presentation = string(readCloserToByte(presentationOpen))
+ }
+ }
+
+ return &p, nil
+}
+
+func (p *PowerPoint) GetSlidesContent() []string {
+ var slides []string
+ for _, slide := range p.Slides {
+ slides = append(slides, slide)
+ }
+ return slides
+}
+
+// 只能删除文本编辑密码
+func (p *PowerPoint) DeletePassWord() {
+ reg := regexp.MustCompile("")
+ p.Presentation = reg.ReplaceAllString(p.Presentation, "")
+}
+
+func (p *PowerPoint) GetSlideCount() int {
+ return len(p.Slides)
+}
+
+func (p *PowerPoint) GetNotesSlideCount() int {
+ return len(p.NotesSlides)
+}
+
+func (p *PowerPoint) GetThemeCount() int {
+ return len(p.Themes)
+}
+
+func (p *PowerPoint) FindSlideString(findString string) []int {
+ var nums []int
+ reg := regexp.MustCompile(`\d+`)
+ for k, v := range p.Slides {
+ if strings.Contains(v, findString) {
+ num := reg.FindString(k)
+ n, _ := strconv.Atoi(num)
+ nums = append(nums, n)
+ }
+ }
+ return nums
+}
+
+func (p *PowerPoint) DeleteSlide(index int) error {
+ if index <= 0 {
+ index = len(p.Slides)
+ }
+ if index > len(p.Slides) {
+ return fmt.Errorf("index out of range")
+ }
+
+ p.Slides[fmt.Sprintf("ppt/slides/slide%d.xml", index)] = " "
+ for {
+ if index == len(p.Slides) {
+ break
+ } else {
+ p.Slides[fmt.Sprintf("ppt/slides/slide%d.xml", index)], p.Slides[fmt.Sprintf("ppt/slides/slide%d.xml", index+1)] = p.Slides[fmt.Sprintf("ppt/slides/slide%d.xml", index+1)], p.Slides[fmt.Sprintf("ppt/slides/slide%d.xml", index)]
+ index++
+ }
+ }
+ //通过在p.slides、p.notesslides、p.files中删除对应的页面。删除是成功的,但生成的pptx无法打开。目前只能把需要删除的页置空并放到最后一页。
+ //delete(p.Slides, fmt.Sprintf("ppt/slides/slide%d.xml", len(p.Slides)))
+ //delete(p.NotesSlides, fmt.Sprintf("ppt/notesSlides/notesSlide%d.xml", len(p.NotesSlides)))
+ //for k, v := range p.Files {
+ // if strings.Contains(v.Name, fmt.Sprintf("ppt/slides/slide%d.xml", len(p.Slides)+1)) {
+ // p.Files = append(p.Files[:k], p.Files[k+1:]...)
+ // }
+ // if strings.Contains(v.Name, fmt.Sprintf("ppt/notesSlides/notesSlide%d.xml", len(p.NotesSlides)+1)) {
+ // p.Files= append(p.Files[:k], p.Files[k+1:]...)
+ // }
+ //}
+
+ return nil
+}
+
+func (p *PowerPoint) ReplaceSlideContent(oldString string, newString string, num int) {
+ for k, v := range p.Slides {
+ p.Slides[k] = strings.Replace(v, oldString, newString, num)
+ }
+}
+
+func (p *PowerPoint) ReplaceNotesSlideContent(oldString string, newString string, num int) {
+ for k, v := range p.NotesSlides {
+ p.NotesSlides[k] = strings.Replace(v, oldString, newString, num)
+ }
+}
+
+func (p *PowerPoint) ReplaceThemeName(oldString string, newString string, num int) {
+ for k, v := range p.Themes {
+ p.Themes[k] = strings.Replace(v, oldString, newString, num)
+ }
+}
+
+func (p *PowerPoint) ReplaceImage(newImagePath string, index int) error {
+ if index > len(p.Images) {
+ return fmt.Errorf("index out of range")
+ }
+ newImageOpen, _ := os.ReadFile(newImagePath)
+ newImageStr := string(newImageOpen)
+ for k := range p.Images {
+ if strings.Contains(k, fmt.Sprintf("ppt/media/image%d.", index)) {
+ p.Images[k] = newImageStr
+ }
+ }
+ return nil
+}
+
+func (p *PowerPoint) WriteToFile(path string) (err error) {
+ var target *os.File
+ target, err = os.Create(path)
+ if err != nil {
+ return
+ }
+
+ defer target.Close()
+ err = p.Write(target)
+ return
+}
+
+func (p *PowerPoint) Write(ioWriter io.Writer) (err error) {
+ w := zip.NewWriter(ioWriter)
+ defer w.Close()
+ for _, file := range p.Files {
+ var writer io.Writer
+ var readCloser io.ReadCloser
+ writer, err = w.Create(file.Name)
+ if err != nil {
+ return err
+ }
+
+ if strings.Contains(file.Name, "ppt/slides/slide") && p.Slides[file.Name] != "" {
+ writer.Write([]byte(p.Slides[file.Name]))
+ } else if strings.Contains(file.Name, "ppt/notesSlides/notesSlide") && p.NotesSlides[file.Name] != "" {
+ writer.Write([]byte(p.NotesSlides[file.Name]))
+ } else if strings.Contains(file.Name, "ppt/theme/theme") && p.Themes[file.Name] != "" {
+ writer.Write([]byte(p.Themes[file.Name]))
+ } else if file.Name == "ppt/presentation.xml" {
+ writer.Write([]byte(p.Presentation))
+ } else if strings.Contains(file.Name, "ppt/media/image") && p.Images[file.Name] != "" {
+ writer.Write([]byte(p.Images[file.Name]))
+ } else {
+ readCloser, _ = file.Open()
+ writer.Write(readCloserToByte(readCloser))
+ }
+ }
+ return
+}
+func readCloserToByte(stream io.Reader) []byte {
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(stream)
+ return buf.Bytes()
+}
diff --git a/godo/office/reader.go b/godo/office/reader.go
new file mode 100644
index 0000000..5fbecf5
--- /dev/null
+++ b/godo/office/reader.go
@@ -0,0 +1,164 @@
+package office
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+ "slices"
+)
+
+const headerSize = 8
+
+// recordType is an enumeration that specifies the record type of an atom record or a container record
+// ([MS-PPT] 2.13.24 RecordType)
+type recordType uint16
+
+const (
+ recordTypeUnspecified recordType = 0
+ recordTypeDocument recordType = 0x03E8
+ recordTypeSlide recordType = 0x03EE
+ recordTypeEnvironment recordType = 0x03F2
+ recordTypeSlidePersistAtom recordType = 0x03F3
+ recordTypeSlideShowSlideInfoAtom recordType = 0x03F9
+ recordTypeExternalObjectList recordType = 0x0409
+ recordTypeDrawingGroup recordType = 0x040B
+ recordTypeDrawing recordType = 0x040C
+ recordTypeList recordType = 0x07D0
+ recordTypeSoundCollection recordType = 0x07E4
+ recordTypeTextCharsAtom recordType = 0x0FA0
+ recordTypeTextBytesAtom recordType = 0x0FA8
+ recordTypeHeadersFooters recordType = 0x0FD9
+ recordTypeSlideListWithText recordType = 0x0FF0
+ recordTypeUserEditAtom recordType = 0x0FF5
+ recordTypePersistDirectoryAtom recordType = 0x1772
+ recordTypeRoundTripSlideSyncInfo12 recordType = 0x3714
+)
+
+type readerAtAdapter struct {
+ r io.Reader
+ readBytes []byte
+}
+
+func ToReaderAt(r io.Reader) io.ReaderAt {
+ ra, ok := r.(io.ReaderAt)
+ if ok {
+ return ra
+ }
+ return &readerAtAdapter{
+ r: r,
+ }
+}
+
+func (r *readerAtAdapter) ReadAt(p []byte, off int64) (n int, err error) {
+ if int(off)+len(p) > len(r.readBytes) {
+ err := r.expandBuffer(int(off) + len(p))
+ if err != nil {
+ return 0, err
+ }
+ }
+ return bytesReaderAt(r.readBytes).ReadAt(p, off)
+}
+
+func (r *readerAtAdapter) expandBuffer(newSize int) error {
+ if cap(r.readBytes) < newSize {
+ r.readBytes = slices.Grow(r.readBytes, newSize-cap(r.readBytes))
+ }
+
+ newPart := r.readBytes[len(r.readBytes):newSize]
+ n, err := r.r.Read(newPart)
+ switch {
+ case err == nil:
+ r.readBytes = r.readBytes[:newSize]
+ case errors.Is(err, io.EOF):
+ r.readBytes = r.readBytes[:len(r.readBytes)+n]
+ default:
+ return err
+ }
+ return nil
+}
+
+func BytesReadAt(src []byte, dst []byte, off int64) (n int, err error) {
+ return bytesReaderAt(src).ReadAt(dst, off)
+}
+
+type bytesReaderAt []byte
+
+func (bra bytesReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
+ idx := 0
+ for i := int(off); i < len(bra) && idx < len(p); i, idx = i+1, idx+1 {
+ p[idx] = bra[i]
+ }
+ if idx != len(p) {
+ return idx, io.EOF
+ }
+ return idx, nil
+}
+
+// LowerPart returns lower byte of record type
+func (r recordType) LowerPart() byte {
+ const fullByte = 0xFF
+ return byte(r & fullByte)
+}
+
+var errMismatchRecordType = errors.New("mismatch record type")
+
+type record struct {
+ header [headerSize]byte
+ recordData
+}
+
+// Type returns recordType of record contained in it's header
+func (r record) Type() recordType {
+ return recordType(binary.LittleEndian.Uint16(r.header[2:4]))
+}
+
+// Length returns data length contained in record header
+func (r record) Length() uint32 {
+ return binary.LittleEndian.Uint32(r.header[4:8])
+}
+
+// Data returns all data from record except header
+func (r record) Data() []byte {
+ return r.recordData
+}
+
+type recordData []byte
+
+// ReadAt copies bytes from record data at given offset into buffer p
+func (rd recordData) ReadAt(p []byte, off int64) (n int, err error) {
+ return BytesReadAt(rd, p, off)
+}
+
+// LongAt interprets 4 bytes of record data at given offset as uint32 value and returns it
+func (rd recordData) LongAt(offset int) uint32 {
+ return binary.LittleEndian.Uint32(rd[offset:])
+}
+
+// readRecord reads header and data of record. If wantedType is specified (not equals recordTypeUnspecified),
+// also compares read type with the wanted one and returns an error is they are not equal
+func readRecord(f io.ReaderAt, offset int64, wantedType recordType) (record, error) {
+ r, err := readRecordHeaderOnly(f, offset, wantedType)
+ if err != nil {
+ return record{}, err
+ }
+ r.recordData = make([]byte, r.Length())
+ _, err = f.ReadAt(r.recordData, offset+headerSize)
+ if err != nil {
+ return record{}, err
+ }
+ return r, nil
+}
+
+// readRecordHeaderOnly reads header of record. If wantedType is specified (not equals recordTypeUnspecified),
+// also compares read type with the wanted one and returns an error is they are not equal
+func readRecordHeaderOnly(f io.ReaderAt, offset int64, wantedType recordType) (record, error) {
+ r := record{}
+ _, err := f.ReadAt(r.header[:], offset)
+ if err != nil {
+ return record{}, err
+ }
+ if wantedType != recordTypeUnspecified && r.Type() != wantedType {
+ return record{}, errMismatchRecordType
+ }
+ return r, nil
+}
diff --git a/godo/office/rtf.go b/godo/office/rtf.go
new file mode 100644
index 0000000..54b61ff
--- /dev/null
+++ b/godo/office/rtf.go
@@ -0,0 +1,362 @@
+// Package rtftxt extracts text from .rtf documents
+package office
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/EndFirstCorp/peekingReader"
+)
+
+// ToStr converts a .rtf document file to string
+func rtf2txt(filename string) (string, error) {
+ content, err := os.ReadFile(filename)
+ if err != nil {
+ return "", err
+ }
+
+ return BytesToStr(content)
+}
+
+// BytesToStr converts a []byte representation of a .rtf document file to string
+func BytesToStr(data []byte) (string, error) {
+ reader := bytes.NewReader(data)
+ r, err := Text(reader)
+ if err != nil {
+ return "", err
+ }
+ s := r.String()
+ return s, nil
+}
+
+type stack struct {
+ top *element
+ size int
+}
+
+type element struct {
+ value string
+ next *element
+}
+
+func (s *stack) Len() int {
+ return s.size
+}
+
+func (s *stack) Push(value string) {
+ s.top = &element{value, s.top}
+ s.size++
+}
+
+func (s *stack) Peek() string {
+ if s.size == 0 {
+ return ""
+ }
+ return s.top.value
+}
+
+func (s *stack) Pop() string {
+ if s.size > 0 {
+ var v string
+ v, s.top = s.top.value, s.top.next
+ s.size--
+ return v
+ }
+ return ""
+}
+
+// Text is used to convert an io.Reader containing RTF data into
+// plain text
+func Text(r io.Reader) (*bytes.Buffer, error) {
+ pr := peekingReader.NewBufReader(r)
+
+ var text bytes.Buffer
+ var symbolStack stack
+ for b, err := pr.ReadByte(); err == nil; b, err = pr.ReadByte() {
+ switch b {
+ case '\\':
+ err := readControl(pr, &symbolStack, &text)
+ if err != nil {
+ return nil, err
+ }
+ case '{', '}':
+ case '\n', '\r': // noop
+ default:
+ text.WriteByte(b)
+ }
+ }
+ return &text, nil
+}
+
+func readControl(r peekingReader.Reader, s *stack, text *bytes.Buffer) error {
+ control, num, err := tokenizeControl(r)
+ if err != nil {
+ return err
+ }
+ if control == "*" { // this is an extended control sequence
+ err = readUntilClosingBrace(r)
+ if err != nil {
+ return err
+ }
+ if last := s.Peek(); last != "" {
+ val, err := getParams(r) // last control was interrupted, so finish handling Params
+ handleParams(control, val, text)
+ return err
+ }
+ return nil
+ }
+ if isUnicode, u := getUnicode(control); isUnicode {
+ text.WriteString(u)
+ return nil
+ }
+ if control == "" {
+ p, err := r.Peek(1)
+ if err != nil {
+ return err
+ }
+ if p[0] == '\\' || p[0] == '{' || p[0] == '}' { // this is an escaped character
+ text.WriteByte(p[0])
+ r.ReadByte()
+ return nil
+ }
+ text.WriteByte('\n')
+ return nil
+ }
+ if control == "binN" {
+ return handleBinary(r, control, num)
+ }
+
+ if symbol, found := convertSymbol(control); found {
+ text.WriteString(symbol)
+ }
+
+ val, err := getParams(r)
+ if err != nil {
+ return err
+ }
+ handleParams(control, val, text)
+ s.Push(control)
+ return nil
+}
+
+func tokenizeControl(r peekingReader.Reader) (string, int, error) {
+ var buf bytes.Buffer
+ isHex := false
+ numStart := -1
+ for {
+ p, err := r.Peek(1)
+ if err != nil {
+ return "", -1, err
+ }
+ b := p[0]
+ switch {
+ case b == '*' && buf.Len() == 0:
+ r.ReadByte() // consume valid digit
+ return "*", -1, nil
+ case b == '\'' && buf.Len() == 0:
+ isHex = true
+ buf.WriteByte(b)
+ r.ReadByte() // consume valid character
+ // read 2 bytes for hex
+ for i := 0; i < 2; i++ {
+ b, err = r.ReadByte() // consume valid digit
+ if err != nil {
+ return "", -1, err
+ }
+ buf.WriteByte(b)
+ }
+ return buf.String(), -1, nil
+ case b >= '0' && b <= '9' || b == '-':
+ if numStart == -1 {
+ numStart = buf.Len()
+ } else if numStart == 0 {
+ return "", -1, fmt.Errorf("unexpected control sequence. Cannot begin with digit")
+ }
+ buf.WriteByte(b)
+ r.ReadByte() // consume valid digit
+ case b >= 'a' && b <= 'z' || b >= 'A' && b <= 'Z':
+ if numStart > 0 { // we've already seen alpha character(s) plus digit(s)
+ c, num := canonicalize(buf.String(), numStart)
+ return c, num, nil
+ }
+ buf.WriteByte(b)
+ r.ReadByte()
+ default:
+ if isHex {
+ return buf.String(), -1, nil
+ }
+ c, num := canonicalize(buf.String(), numStart)
+ return c, num, nil
+ }
+ }
+}
+
+func canonicalize(control string, numStart int) (string, int) {
+ if numStart == -1 || numStart >= len(control) {
+ return control, -1
+ }
+ num, err := strconv.Atoi(control[numStart:])
+ if err != nil {
+ return control, -1
+ }
+ return control[:numStart] + "N", num
+}
+
+func getUnicode(control string) (bool, string) {
+ if len(control) < 2 || control[0] != '\'' {
+ return false, ""
+ }
+
+ var buf bytes.Buffer
+ for i := 1; i < len(control); i++ {
+ b := control[i]
+ if b >= '0' && b <= '9' || b >= 'a' && b <= 'f' || b >= 'A' && b <= 'F' {
+ buf.WriteByte(b)
+ } else {
+ break
+ }
+ }
+ after := control[buf.Len()+1:]
+ num, _ := strconv.ParseInt(buf.String(), 16, 16)
+ return true, fmt.Sprintf("%c%s", num, after)
+}
+
+func getParams(r peekingReader.Reader) (string, error) {
+ data, err := peekingReader.ReadUntilAny(r, []byte{'\\', '{', '}', '\n', '\r', ';'})
+ if err != nil {
+ return "", err
+ }
+ p, err := r.Peek(1)
+ if err != nil {
+ return "", err
+ }
+ if p[0] == ';' { // skip next if it is a semicolon
+ r.ReadByte()
+ }
+
+ return string(data), nil
+}
+
+func handleBinary(r peekingReader.Reader, control string, size int) error {
+ if control != "binN" { // wrong control type
+ return nil
+ }
+
+ _, err := r.ReadBytes(size)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func readUntilClosingBrace(r peekingReader.Reader) error {
+ count := 1
+ var b byte
+ var err error
+ for b, err = r.ReadByte(); err == nil; b, err = r.ReadByte() {
+ switch b {
+ case '{':
+ count++
+ case '}':
+ count--
+ }
+ if count == 0 {
+ return nil
+ }
+ }
+ return err
+}
+
+func handleParams(control, param string, text io.StringWriter) {
+ param = strings.TrimPrefix(param, " ")
+ if param == "" {
+ return
+ }
+ switch control {
+ case "fldrslt":
+ text.WriteString(param)
+ case "acccircle", "acccomma", "accdot", "accnone", "accunderdot",
+ "animtextN", "b", "caps", "cbN", "cchsN ", "cfN", "charscalexN",
+ "csN", "dnN", "embo", "expndN", "expndtwN ", "fittextN", "fN",
+ "fsN", "i", "kerningN ", "langfeN", "langfenpN", "langN", "langnpN",
+ "ltrch", "noproof", "nosupersub ", "outl", "plain", "rtlch", "scaps",
+ "shad", "strike", "sub ", "super ", "ul", "ulcN", "uld", "uldash",
+ "uldashd", "uldashdd", "uldb", "ulhwave", "ulldash", "ulnone", "ulth",
+ "ulthd", "ulthdash", "ulthdashd", "ulthdashdd", "ulthldash", "ululdbwave", "ulw", "ulwave", "upN", "v", "webhidden":
+ text.WriteString(param)
+
+ // Paragraph Formatting Properties
+ case "aspalpha", "aspnum", "collapsed", "contextualspace",
+ "cufiN", "culiN", "curiN", "faauto", "facenter",
+ "fafixed", "fahang", "faroman", "favar", "fiN", "hyphpar ",
+ "indmirror", "intbl", "itapN", "keep", "keepn", "levelN", "liN",
+ "linN", "lisaN", "lisbN", "ltrpar", "nocwrap", "noline", "nooverflow",
+ "nosnaplinegrid", "nowidctlpar ", "nowwrap", "outlinelevelN ", "pagebb",
+ "pard", "prauthN", "prdateN", "qc", "qd", "qj", "qkN", "ql", "qr", "qt",
+ "riN", "rinN", "rtlpar", "saautoN", "saN", "sbautoN", "sbN", "sbys",
+ "slmultN", "slN", "sN", "spv", "subdocumentN ", "tscbandhorzeven",
+ "tscbandhorzodd", "tscbandverteven", "tscbandvertodd", "tscfirstcol",
+ "tscfirstrow", "tsclastcol", "tsclastrow", "tscnecell", "tscnwcell",
+ "tscsecell", "tscswcell", "txbxtwalways", "txbxtwfirst", "txbxtwfirstlast",
+ "txbxtwlast", "txbxtwno", "widctlpar", "ytsN":
+ text.WriteString(param)
+
+ // Section Formatting Properties
+ case "adjustright", "binfsxnN", "binsxnN", "colnoN ", "colsN", "colsrN ", "colsxN", "colwN ", "dsN", "endnhere", "footeryN", "guttersxnN", "headeryN", "horzsect", "linebetcol", "linecont", "linemodN", "lineppage", "linerestart", "linestartsN", "linexN", "lndscpsxn", "ltrsect", "margbsxnN", "marglsxnN", "margmirsxn", "margrsxnN", "margtsxnN", "pghsxnN", "pgnbidia", "pgnbidib", "pgnchosung", "pgncnum", "pgncont", "pgndbnum", "pgndbnumd", "pgndbnumk", "pgndbnumt", "pgndec", "pgndecd", "pgnganada", "pgngbnum", "pgngbnumd", "pgngbnumk", "pgngbnuml", "pgnhindia", "pgnhindib", "pgnhindic", "pgnhindid", "pgnhnN ", "pgnhnsc ", "pgnhnsh ", "pgnhnsm ", "pgnhnsn ", "pgnhnsp ", "pgnid", "pgnlcltr", "pgnlcrm", "pgnrestart", "pgnstartsN", "pgnthaia", "pgnthaib", "pgnthaic", "pgnucltr", "pgnucrm", "pgnvieta", "pgnxN", "pgnyN", "pgnzodiac", "pgnzodiacd", "pgnzodiacl", "pgwsxnN", "pnseclvlN", "rtlsect", "saftnnalc", "saftnnar", "saftnnauc", "saftnnchi", "saftnnchosung", "saftnncnum", "saftnndbar", "saftnndbnum", "saftnndbnumd", "saftnndbnumk", "saftnndbnumt", "saftnnganada", "saftnngbnum", "saftnngbnumd", "saftnngbnumk", "saftnngbnuml", "saftnnrlc", "saftnnruc", "saftnnzodiac", "saftnnzodiacd", "saftnnzodiacl", "saftnrestart", "saftnrstcont", "saftnstartN", "sbkcol", "sbkeven", "sbknone", "sbkodd", "sbkpage", "sectd", "sectdefaultcl", "sectexpandN", "sectlinegridN", "sectspecifycl", "sectspecifygenN", "sectspecifyl", "sectunlocked", "sftnbj", "sftnnalc", "sftnnar", "sftnnauc", "sftnnchi", "sftnnchosung", "sftnncnum", "sftnndbar", "sftnndbnum", "sftnndbnumd", "sftnndbnumk", "sftnndbnumt", "sftnnganada", "sftnngbnum", "sftnngbnumd", "sftnngbnumk", "sftnngbnuml", "sftnnrlc", "sftnnruc", "sftnnzodiac", "sftnnzodiacd", "sftnnzodiacl", "sftnrestart", "sftnrstcont", "sftnrstpg", "sftnstartN", "sftntj", "srauthN", "srdateN", "titlepg", "vertal", "vertalb", "vertalc", "vertalj", "vertalt", "vertsect":
+ text.WriteString(param)
+
+ // Section Text
+ case "stextflowN":
+ text.WriteString(param)
+
+ // Special Characters
+ case "-", ":", "_", "{", "|", "}", "~", "bullet", "chatn", "chdate", "chdpa", "chdpl", "chftn", "chftnsep", "chftnsepc", "chpgn", "chtime", "column", "emdash", "emspace ",
+ "endash", "enspace ", "lbrN", "ldblquote", "line", "lquote", "ltrmark", "page", "par", "qmspace", "rdblquote", "row", "rquote", "rtlmark", "sect", "sectnum", "softcol ", "softlheightN ", "softline ", "softpage ", "tab", "zwbo", "zwj", "zwnbo", "zwnj":
+ text.WriteString(param)
+
+ // Table Definitions
+ case "cell", "cellxN", "clbgbdiag", "clbgcross", "clbgdcross", "clbgdkbdiag", "clbgdkcross", "clbgdkdcross", "clbgdkfdiag", "clbgdkhor", "clbgdkvert", "clbgfdiag", "clbghoriz", "clbgvert", "clbrdrb", "clbrdrl", "clbrdrr", "clbrdrt", "clcbpatN", "clcbpatrawN", "clcfpatN", "clcfpatrawN", "cldel2007", "cldelauthN", "cldeldttmN", "cldgll", "cldglu", "clFitText", "clftsWidthN", "clhidemark", "clins", "clinsauthN", "clinsdttmN", "clmgf", "clmrg", "clmrgd", "clmrgdauthN", "clmrgddttmN", "clmrgdr", "clNoWrap", "clpadbN", "clpadfbN", "clpadflN", "clpadfrN", "clpadftN", "clpadlN", "clpadrN", "clpadtN", "clshdngN", "clshdngrawN", "clshdrawnil", "clspbN", "clspfbN", "clspflN", "clspfrN", "clspftN", "clsplit", "clsplitr", "clsplN", "clsprN", "clsptN", "cltxbtlr", "cltxlrtb", "cltxlrtbv", "cltxtbrl", "cltxtbrlv", "clvertalb", "clvertalc", "clvertalt", "clvmgf", "clvmrg", "clwWidthN", "irowbandN", "irowN", "lastrow", "ltrrow", "nestcell", "nestrow", "nesttableprops", "nonesttables", "rawclbgbdiag", "rawclbgcross", "rawclbgdcross", "rawclbgdkbdiag", "rawclbgdkcross", "rawclbgdkdcross", "rawclbgdkfdiag", "rawclbgdkhor", "rawclbgdkvert", "rawclbgfdiag", "rawclbghoriz", "rawclbgvert", "rtlrow", "tabsnoovrlp", "taprtl", "tblindN", "tblindtypeN", "tbllkbestfit", "tbllkborder", "tbllkcolor", "tbllkfont", "tbllkhdrcols", "tbllkhdrrows", "tbllklastcol", "tbllklastrow", "tbllknocolband", "tbllknorowband", "tbllkshading", "tcelld", "tdfrmtxtBottomN", "tdfrmtxtLeftN", "tdfrmtxtRightN", "tdfrmtxtTopN", "tphcol", "tphmrg", "tphpg", "tposnegxN", "tposnegyN", "tposxc", "tposxi", "tposxl", "tposxN", "tposxo", "tposxr", "tposyb", "tposyc", "tposyil", "tposyin", "tposyN", "tposyout", "tposyt", "tpvmrg", "tpvpara", "tpvpg", "trauthN", "trautofitN", "trbgbdiag", "trbgcross", "trbgdcross", "trbgdkbdiag", "trbgdkcross", "trbgdkdcross", "trbgdkfdiag", "trbgdkhor", "trbgdkvert", "trbgfdiag", "trbghoriz", "trbgvert", "trbrdrb ", "trbrdrh ", "trbrdrl ", "trbrdrr ", "trbrdrt ", "trbrdrv ", "trcbpatN", "trcfpatN", "trdateN", "trftsWidthAN", "trftsWidthBN", "trftsWidthN", "trgaphN", "trhdr ", "trkeep ", "trkeepfollow", "trleftN", "trowd", "trpaddbN", "trpaddfbN", "trpaddflN", "trpaddfrN", "trpaddftN", "trpaddlN", "trpaddrN", "trpaddtN", "trpadobN", "trpadofbN", "trpadoflN", "trpadofrN", "trpadoftN", "trpadolN", "trpadorN", "trpadotN", "trpatN", "trqc", "trql", "trqr", "trrhN", "trshdngN", "trspdbN", "trspdfbN", "trspdflN", "trspdfrN", "trspdftN", "trspdlN", "trspdrN", "trspdtN", "trspobN", "trspofbN", "trspoflN", "trspofrN", "trspoftN", "trspolN", "trsporN", "trspotN", "trwWidthAN", "trwWidthBN", "trwWidthN":
+ text.WriteString(param)
+
+ // Table of Contents Entries
+ case "tc", "tcfN", "tclN", "tcn ":
+ text.WriteString(param)
+
+ // Tabs
+ case "tbN", "tldot", "tleq", "tlhyph", "tlmdot", "tlth", "tlul", "tqc", "tqdec", "tqr", "txN":
+ text.WriteString(param)
+ }
+}
+
+func convertSymbol(symbol string) (string, bool) {
+ switch symbol {
+ case "bullet":
+ return "*", true
+ case "chdate", "chdpa", "chdpl":
+ return time.Now().Format("2005-01-02"), true
+ case "chtime":
+ return time.Now().Format("4:56 pm"), true
+ case "emdash", "endash":
+ return "-", true
+ case "lquote", "rquote":
+ return "'", true
+ case "ldblquote", "rdblquote":
+ return "\"", true
+ case "line", "lbrN":
+ return "\n", true
+ case "cell", "column", "emspace", "enspace", "qmspace", "nestcell", "nestrow", "page", "par", "row", "sect", "tab":
+ return " ", true
+ case "|", "~", "-", "_", ":":
+ return symbol, true
+ case "chatn", "chftn", "chftnsep", "chftnsepc", "chpgn", "sectnum", "ltrmark", "rtlmark", "zwbo", "zwj", "zwnbo", "zwnj", "softcol",
+ "softline", "softpage":
+ return "", true
+ default:
+ return "", false
+ }
+}
diff --git a/godo/office/txt.go b/godo/office/txt.go
new file mode 100644
index 0000000..c1734da
--- /dev/null
+++ b/godo/office/txt.go
@@ -0,0 +1,41 @@
+package office
+
+import (
+ "bufio"
+ "os"
+ "regexp"
+ "strings"
+)
+
+func text2txt(filename string) (string, error) {
+ file, err := os.Open(filename)
+ if err != nil {
+ return "", err
+ }
+ defer file.Close()
+
+ var lines []string
+ scanner := bufio.NewScanner(file)
+ for scanner.Scan() {
+ line := scanner.Text()
+ // 移除行内的换行符
+ line = strings.ReplaceAll(line, "\r", "")
+ lines = append(lines, line)
+ }
+
+ if err := scanner.Err(); err != nil {
+ return "", err
+ }
+
+ // 合并所有行
+ content := strings.Join(lines, " ")
+
+ // 移除多余的空格
+ re := regexp.MustCompile(`\s+`)
+ content = re.ReplaceAllString(content, " ")
+
+ // 移除开头和结尾的空格
+ content = strings.TrimSpace(content)
+
+ return content, nil
+}
diff --git a/godo/office/types.go b/godo/office/types.go
new file mode 100644
index 0000000..9bfc3f7
--- /dev/null
+++ b/godo/office/types.go
@@ -0,0 +1,46 @@
+package office
+
+import (
+ "regexp"
+ "time"
+)
+
+const maxBytes = 1024 << 20 // 1GB
+const ISO string = "2006-01-02T15:04:05"
+
+var TAG_RE = regexp.MustCompile(`(<[^>]*>)+`)
+var PARA_RE = regexp.MustCompile(`([a-z]:p>)+`)
+var DEBUG bool = false
+
+type Document struct {
+ path string
+ RePath string `json:"path"`
+ Filename string `json:"filename"`
+ Title string `json:"title"`
+ Subject string `json:"subject"`
+ Creator string `json:"creator"`
+ Keywords string `json:"keywords"`
+ Description string `json:"description"`
+ Lastmodifiedby string `json:"lastModifiedBy"`
+ Revision string `json:"revision"`
+ Category string `json:"category"`
+ Content string `json:"content"`
+ Modifytime time.Time `json:"modified"`
+ Createtime time.Time `json:"created"`
+ Accesstime time.Time `json:"accessed"`
+ Size int `json:"size"`
+}
+
+type DocReader func(string) (string, error)
+type XMLContent struct {
+ Title string `xml:"title"`
+ Subject string `xml:"subject"`
+ Creator string `xml:"creator"`
+ Keywords string `xml:"keywords"`
+ Description string `xml:"description"`
+ LastModifiedBy string `xml:"lastModifiedBy"`
+ Revision string `xml:"revision"`
+ Created string `xml:"created"`
+ Modified string `xml:"modified"`
+ Category string `xml:"category"`
+}
diff --git a/godo/office/windows.go b/godo/office/windows.go
new file mode 100644
index 0000000..8e1e685
--- /dev/null
+++ b/godo/office/windows.go
@@ -0,0 +1,27 @@
+//go:build windows
+// +build windows
+
+package office
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+func getFileInfoData(data *Document) (bool, error) {
+ fileinfo, err := os.Stat(data.path)
+ if err != nil {
+ return false, err
+ }
+ data.Filename = fileinfo.Name()
+ data.Title = data.Filename
+ data.Size = int(fileinfo.Size())
+
+ stat := fileinfo.Sys().(*syscall.Win32FileAttributeData)
+ data.Createtime = time.Unix(0, stat.CreationTime.Nanoseconds())
+ data.Modifytime = time.Unix(0, stat.LastWriteTime.Nanoseconds())
+ data.Accesstime = time.Unix(0, stat.LastAccessTime.Nanoseconds())
+
+ return true, nil
+}
diff --git a/godo/office/xls.go b/godo/office/xls.go
new file mode 100644
index 0000000..22aa6f0
--- /dev/null
+++ b/godo/office/xls.go
@@ -0,0 +1,125 @@
+package office
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+
+ "godo/office/xls"
+)
+
+func XLS2Text(reader io.ReadSeeker) (string, error) {
+
+ xlFile, err := xls.OpenReader(reader, "utf-8")
+ if err != nil || xlFile == nil {
+ return "", err
+ }
+
+ extracted_text := ""
+ for n := 0; n < xlFile.NumSheets(); n++ {
+ if sheet1 := xlFile.GetSheet(n); sheet1 != nil {
+ if extracted_text != "" {
+ extracted_text = fmt.Sprintf("%s\n%s", extracted_text, xlGenerateSheetTitle(sheet1.Name, n, int(sheet1.MaxRow)))
+ } else {
+ extracted_text = fmt.Sprintf("%s%s", extracted_text, xlGenerateSheetTitle(sheet1.Name, n, int(sheet1.MaxRow)))
+ }
+
+ for m := 0; m <= int(sheet1.MaxRow); m++ {
+ row1 := sheet1.Row(m)
+ if row1 == nil {
+ continue
+ }
+
+ rowText := ""
+
+ // go through all columns
+ for c := row1.FirstCol(); c < row1.LastCol(); c++ {
+ if text := row1.Col(c); text != "" {
+ text = cleanCell(text)
+
+ if c > row1.FirstCol() {
+ rowText += ", "
+ }
+ rowText += text
+ }
+ }
+ if extracted_text != "" {
+ extracted_text = fmt.Sprintf("%s\n%s", extracted_text, rowText)
+ } else {
+ extracted_text = fmt.Sprintf("%s%s", extracted_text, rowText)
+ }
+ }
+ }
+ }
+
+ return extracted_text, nil
+}
+
+// cleanCell returns a cleaned cell text without new-lines
+func cleanCell(text string) string {
+ text = strings.ReplaceAll(text, "\n", " ")
+ text = strings.ReplaceAll(text, "\r", "")
+ text = strings.TrimSpace(text)
+
+ return text
+}
+
+func xlGenerateSheetTitle(name string, number, rows int) (title string) {
+ if number > 0 {
+ title += "\n"
+ }
+
+ title += fmt.Sprintf("Sheet \"%s\" (%d rows):\n", name, rows)
+
+ return title
+}
+
+// func writeOutput(writer io.Writer, output []byte, alreadyWritten *int64, size *int64) (err error) {
+
+// if int64(len(output)) > *size {
+// output = output[:*size]
+// }
+
+// *size -= int64(len(output))
+
+// writtenOut, err := writer.Write(output)
+// *alreadyWritten += int64(writtenOut)
+
+// return err
+// }
+
+// IsFileXLS checks if the data indicates a XLS file
+// XLS has a signature of D0 CF 11 E0 A1 B1 1A E1
+func IsFileXLS(data []byte) bool {
+ return bytes.HasPrefix(data, []byte{0xD0, 0xCF, 0x11, 0xE0, 0xA1, 0xB1, 0x1A, 0xE1})
+}
+
+// XLS2Cells converts an XLS file to individual cells
+func XLS2Cells(reader io.ReadSeeker) (cells []string, err error) {
+
+ xlFile, err := xls.OpenReader(reader, "utf-8")
+ if err != nil || xlFile == nil {
+ return nil, err
+ }
+
+ for n := 0; n < xlFile.NumSheets(); n++ {
+ if sheet1 := xlFile.GetSheet(n); sheet1 != nil {
+ for m := 0; m <= int(sheet1.MaxRow); m++ {
+ row1 := sheet1.Row(m)
+ if row1 == nil {
+ continue
+ }
+
+ for c := row1.FirstCol(); c < row1.LastCol(); c++ {
+ if text := row1.Col(c); text != "" {
+ text = cleanCell(text)
+ cells = append(cells, text)
+ }
+ }
+ }
+ }
+ }
+
+ return
+}
diff --git a/godo/office/xls/README.md b/godo/office/xls/README.md
new file mode 100644
index 0000000..b7a2832
--- /dev/null
+++ b/godo/office/xls/README.md
@@ -0,0 +1,15 @@
+# xls
+
+[](https://godoc.org/github.com/extrame/xls)
+
+Pure Golang xls library writen by [Rongshu Tech (chinese)](http://www.rongshu.tech), based on libxls.
+
+Thanks for contributions from Tamás Gulácsi @tgulacsi, @flyin9.
+
+# Basic Usage
+
+* Use **Open** function for open file
+* Use **OpenWithCloser** function for open file and use the return value closer for close file
+* Use **OpenReader** function for open xls from a reader, you should close related file in your own code
+
+* Follow the example in GoDoc
\ No newline at end of file
diff --git a/godo/office/xls/bof.go b/godo/office/xls/bof.go
new file mode 100644
index 0000000..ecb1f1e
--- /dev/null
+++ b/godo/office/xls/bof.go
@@ -0,0 +1,30 @@
+package xls
+
+import (
+ "encoding/binary"
+ "io"
+ "unicode/utf16"
+)
+
+//the information unit in xls file
+type bof struct {
+ Id uint16
+ Size uint16
+}
+
+//read the utf16 string from reader
+func (b *bof) utf16String(buf io.ReadSeeker, count uint32) string {
+ var bts = make([]uint16, count)
+ binary.Read(buf, binary.LittleEndian, &bts)
+ runes := utf16.Decode(bts[:len(bts)-1])
+ return string(runes)
+}
+
+type biffHeader struct {
+ Ver uint16
+ Type uint16
+ Id_make uint16
+ Year uint16
+ Flags uint32
+ Min_ver uint32
+}
diff --git a/godo/office/xls/cell_range.go b/godo/office/xls/cell_range.go
new file mode 100644
index 0000000..2dde04e
--- /dev/null
+++ b/godo/office/xls/cell_range.go
@@ -0,0 +1,63 @@
+package xls
+
+import (
+ "fmt"
+)
+
+// range type of multi rows
+type Ranger interface {
+ FirstRow() uint16
+ LastRow() uint16
+}
+
+// range type of multi cells in multi rows
+type CellRange struct {
+ FirstRowB uint16
+ LastRowB uint16
+ FristColB uint16
+ LastColB uint16
+}
+
+func (c *CellRange) FirstRow() uint16 {
+ return c.FirstRowB
+}
+
+func (c *CellRange) LastRow() uint16 {
+ return c.LastRowB
+}
+
+func (c *CellRange) FirstCol() uint16 {
+ return c.FristColB
+}
+
+func (c *CellRange) LastCol() uint16 {
+ return c.LastColB
+}
+
+//hyperlink type's content
+type HyperLink struct {
+ CellRange
+ Description string
+ TextMark string
+ TargetFrame string
+ Url string
+ ShortedFilePath string
+ ExtendedFilePath string
+ IsUrl bool
+}
+
+//get the hyperlink string, use the public variable Url to get the original Url
+func (h *HyperLink) String(wb *WorkBook) []string {
+ res := make([]string, h.LastColB-h.FristColB+1)
+ var str string
+ if h.IsUrl {
+ str = fmt.Sprintf("%s(%s)", h.Description, h.Url)
+ } else {
+ str = h.ExtendedFilePath
+ }
+
+ for i := uint16(0); i < h.LastColB-h.FristColB+1; i++ {
+ res[i] = str
+ }
+ return res
+}
diff --git a/godo/office/xls/col.go b/godo/office/xls/col.go
new file mode 100644
index 0000000..a7c4329
--- /dev/null
+++ b/godo/office/xls/col.go
@@ -0,0 +1,238 @@
+package xls
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "time"
+)
+
+// content type
+type contentHandler interface {
+ String(*WorkBook) []string
+ FirstCol() uint16
+ LastCol() uint16
+}
+
+type Col struct {
+ RowB uint16
+ FirstColB uint16
+}
+
+type Coler interface {
+ Row() uint16
+}
+
+func (c *Col) Row() uint16 {
+ return c.RowB
+}
+
+func (c *Col) FirstCol() uint16 {
+ return c.FirstColB
+}
+
+func (c *Col) LastCol() uint16 {
+ return c.FirstColB
+}
+
+func (c *Col) String(wb *WorkBook) []string {
+ return []string{"default"}
+}
+
+type XfRk struct {
+ Index uint16
+ Rk RK
+}
+
+func (xf *XfRk) String(wb *WorkBook) string {
+ idx := int(xf.Index)
+ if len(wb.Xfs) > idx {
+ fNo := wb.Xfs[idx].formatNo()
+ if fNo >= 164 { // user defined format
+ if formatter := wb.Formats[fNo]; formatter != nil {
+ formatterLower := strings.ToLower(formatter.str)
+ if formatterLower == "general" ||
+ strings.Contains(formatter.str, "#") ||
+ strings.Contains(formatter.str, ".00") ||
+ strings.Contains(formatterLower, "m/y") ||
+ strings.Contains(formatterLower, "d/y") ||
+ strings.Contains(formatterLower, "m.y") ||
+ strings.Contains(formatterLower, "d.y") ||
+ strings.Contains(formatterLower, "h:") ||
+ strings.Contains(formatterLower, "д.г") {
+ //If format contains # or .00 then this is a number
+ return xf.Rk.String()
+ } else {
+ i, f, isFloat := xf.Rk.number()
+ if !isFloat {
+ f = float64(i)
+ }
+ t := timeFromExcelTime(f, wb.dateMode == 1)
+ return t.Format(formatter.str)
+ }
+ }
+ // see http://www.openoffice.org/sc/excelfileformat.pdf Page #174
+ } else if 14 <= fNo && fNo <= 17 || fNo == 22 || 27 <= fNo && fNo <= 36 || 50 <= fNo && fNo <= 58 { // jp. date format
+ i, f, isFloat := xf.Rk.number()
+ if !isFloat {
+ f = float64(i)
+ }
+ t := timeFromExcelTime(f, wb.dateMode == 1)
+ return t.Format(time.RFC3339) //TODO it should be international
+ }
+ }
+ return xf.Rk.String()
+}
+
+type RK uint32
+
+func (rk RK) number() (intNum int64, floatNum float64, isFloat bool) {
+ multiplied := rk & 1
+ isInt := rk & 2
+ val := int32(rk) >> 2
+ if isInt == 0 {
+ isFloat = true
+ floatNum = math.Float64frombits(uint64(val) << 34)
+ if multiplied != 0 {
+ floatNum = floatNum / 100
+ }
+ return
+ }
+ if multiplied != 0 {
+ isFloat = true
+ floatNum = float64(val) / 100
+ return
+ }
+ return int64(val), 0, false
+}
+
+func (rk RK) String() string {
+ i, f, isFloat := rk.number()
+ if isFloat {
+ return strconv.FormatFloat(f, 'f', -1, 64)
+ }
+ return strconv.FormatInt(i, 10)
+}
+
+var ErrIsInt = fmt.Errorf("is int")
+
+func (rk RK) Float() (float64, error) {
+ _, f, isFloat := rk.number()
+ if !isFloat {
+ return 0, ErrIsInt
+ }
+ return f, nil
+}
+
+type MulrkCol struct {
+ Col
+ Xfrks []XfRk
+ LastColB uint16
+}
+
+func (c *MulrkCol) LastCol() uint16 {
+ return c.LastColB
+}
+
+func (c *MulrkCol) String(wb *WorkBook) []string {
+ var res = make([]string, len(c.Xfrks))
+ for i := 0; i < len(c.Xfrks); i++ {
+ xfrk := c.Xfrks[i]
+ res[i] = xfrk.String(wb)
+ }
+ return res
+}
+
+type MulBlankCol struct {
+ Col
+ Xfs []uint16
+ LastColB uint16
+}
+
+func (c *MulBlankCol) LastCol() uint16 {
+ return c.LastColB
+}
+
+func (c *MulBlankCol) String(wb *WorkBook) []string {
+ return make([]string, len(c.Xfs))
+}
+
+type NumberCol struct {
+ Col
+ Index uint16
+ Float float64
+}
+
+func (c *NumberCol) String(wb *WorkBook) []string {
+ if fNo := wb.Xfs[c.Index].formatNo(); fNo != 0 {
+ t := timeFromExcelTime(c.Float, wb.dateMode == 1)
+ return []string{t.Format(wb.Formats[fNo].str)}
+ }
+ return []string{strconv.FormatFloat(c.Float, 'f', -1, 64)}
+}
+
+type FormulaStringCol struct {
+ Col
+ RenderedValue string
+}
+
+func (c *FormulaStringCol) String(wb *WorkBook) []string {
+ return []string{c.RenderedValue}
+}
+
+//str, err = wb.get_string(buf_item, size)
+//wb.sst[offset_pre] = wb.sst[offset_pre] + str
+
+type FormulaCol struct {
+ Header struct {
+ Col
+ IndexXf uint16
+ Result [8]byte
+ Flags uint16
+ _ uint32
+ }
+ Bts []byte
+}
+
+func (c *FormulaCol) String(wb *WorkBook) []string {
+ return []string{"FormulaCol"}
+}
+
+type RkCol struct {
+ Col
+ Xfrk XfRk
+}
+
+func (c *RkCol) String(wb *WorkBook) []string {
+ return []string{c.Xfrk.String(wb)}
+}
+
+type LabelsstCol struct {
+ Col
+ Xf uint16
+ Sst uint32
+}
+
+func (c *LabelsstCol) String(wb *WorkBook) []string {
+ return []string{wb.sst[int(c.Sst)]}
+}
+
+type labelCol struct {
+ BlankCol
+ Str string
+}
+
+func (c *labelCol) String(wb *WorkBook) []string {
+ return []string{c.Str}
+}
+
+type BlankCol struct {
+ Col
+ Xf uint16
+}
+
+func (c *BlankCol) String(wb *WorkBook) []string {
+ return []string{""}
+}
diff --git a/godo/office/xls/date.go b/godo/office/xls/date.go
new file mode 100644
index 0000000..b7d2d04
--- /dev/null
+++ b/godo/office/xls/date.go
@@ -0,0 +1,98 @@
+package xls
+
+import (
+ "math"
+ "time"
+)
+
+const MJD_0 float64 = 2400000.5
+const MJD_JD2000 float64 = 51544.5
+
+func shiftJulianToNoon(julianDays, julianFraction float64) (float64, float64) {
+ switch {
+ case -0.5 < julianFraction && julianFraction < 0.5:
+ julianFraction += 0.5
+ case julianFraction >= 0.5:
+ julianDays += 1
+ julianFraction -= 0.5
+ case julianFraction <= -0.5:
+ julianDays -= 1
+ julianFraction += 1.5
+ }
+ return julianDays, julianFraction
+}
+
+// Return the integer values for hour, minutes, seconds and
+// nanoseconds that comprised a given fraction of a day.
+func fractionOfADay(fraction float64) (hours, minutes, seconds, nanoseconds int) {
+ f := 5184000000000000 * fraction
+ nanoseconds = int(math.Mod(f, 1000000000))
+ f = f / 1000000000
+ seconds = int(math.Mod(f, 60))
+ f = f / 3600
+ minutes = int(math.Mod(f, 60))
+ f = f / 60
+ hours = int(f)
+ return hours, minutes, seconds, nanoseconds
+}
+
+func julianDateToGregorianTime(part1, part2 float64) time.Time {
+ part1I, part1F := math.Modf(part1)
+ part2I, part2F := math.Modf(part2)
+ julianDays := part1I + part2I
+ julianFraction := part1F + part2F
+ julianDays, julianFraction = shiftJulianToNoon(julianDays, julianFraction)
+ day, month, year := doTheFliegelAndVanFlandernAlgorithm(int(julianDays))
+ hours, minutes, seconds, nanoseconds := fractionOfADay(julianFraction)
+ return time.Date(year, time.Month(month), day, hours, minutes, seconds, nanoseconds, time.UTC)
+}
+
+// By this point generations of programmers have repeated the
+// algorithm sent to the editor of "Communications of the ACM" in 1968
+// (published in CACM, volume 11, number 10, October 1968, p.657).
+// None of those programmers seems to have found it necessary to
+// explain the constants or variable names set out by Henry F. Fliegel
+// and Thomas C. Van Flandern. Maybe one day I'll buy that jounal and
+// expand an explanation here - that day is not today.
+func doTheFliegelAndVanFlandernAlgorithm(jd int) (day, month, year int) {
+ l := jd + 68569
+ n := (4 * l) / 146097
+ l = l - (146097*n+3)/4
+ i := (4000 * (l + 1)) / 1461001
+ l = l - (1461*i)/4 + 31
+ j := (80 * l) / 2447
+ d := l - (2447*j)/80
+ l = j / 11
+ m := j + 2 - (12 * l)
+ y := 100*(n-49) + i + l
+ return d, m, y
+}
+
+// Convert an excelTime representation (stored as a floating point number) to a time.Time.
+func timeFromExcelTime(excelTime float64, date1904 bool) time.Time {
+ var date time.Time
+ var intPart int64 = int64(excelTime)
+ // Excel uses Julian dates prior to March 1st 1900, and
+ // Gregorian thereafter.
+ if intPart <= 61 {
+ const OFFSET1900 = 15018.0
+ const OFFSET1904 = 16480.0
+ var date time.Time
+ if date1904 {
+ date = julianDateToGregorianTime(MJD_0+OFFSET1904, excelTime)
+ } else {
+ date = julianDateToGregorianTime(MJD_0+OFFSET1900, excelTime)
+ }
+ return date
+ }
+ var floatPart float64 = excelTime - float64(intPart)
+ var dayNanoSeconds float64 = 24 * 60 * 60 * 1000 * 1000 * 1000
+ if date1904 {
+ date = time.Date(1904, 1, 1, 0, 0, 0, 0, time.UTC)
+ } else {
+ date = time.Date(1899, 12, 30, 0, 0, 0, 0, time.UTC)
+ }
+ durationDays := time.Duration(intPart) * time.Hour * 24
+ durationPart := time.Duration(dayNanoSeconds * floatPart)
+ return date.Add(durationDays).Add(durationPart)
+}
diff --git a/godo/office/xls/doc.go b/godo/office/xls/doc.go
new file mode 100644
index 0000000..96aef6b
--- /dev/null
+++ b/godo/office/xls/doc.go
@@ -0,0 +1,4 @@
+//xls package use to parse the 97 -2004 microsoft xls file(".xls" suffix, NOT ".xlsx" suffix )
+//
+//there are some example in godoc, please follow them.
+package xls
diff --git a/godo/office/xls/font.go b/godo/office/xls/font.go
new file mode 100644
index 0000000..a85df50
--- /dev/null
+++ b/godo/office/xls/font.go
@@ -0,0 +1,19 @@
+package xls
+
+type FontInfo struct {
+ Height uint16
+ Flag uint16
+ Color uint16
+ Bold uint16
+ Escapement uint16
+ Underline byte
+ Family byte
+ Charset byte
+ Notused byte
+ NameB byte
+}
+
+type Font struct {
+ Info *FontInfo
+ Name string
+}
diff --git a/godo/office/xls/format.go b/godo/office/xls/format.go
new file mode 100644
index 0000000..35b576c
--- /dev/null
+++ b/godo/office/xls/format.go
@@ -0,0 +1,9 @@
+package xls
+
+type Format struct {
+ Head struct {
+ Index uint16
+ Size uint16
+ }
+ str string
+}
diff --git a/godo/office/xls/row.go b/godo/office/xls/row.go
new file mode 100644
index 0000000..0908172
--- /dev/null
+++ b/godo/office/xls/row.go
@@ -0,0 +1,57 @@
+package xls
+
+type rowInfo struct {
+ Index uint16
+ Fcell uint16
+ Lcell uint16
+ Height uint16
+ Notused uint16
+ Notused2 uint16
+ Flags uint32
+}
+
+//Row the data of one row
+type Row struct {
+ wb *WorkBook
+ info *rowInfo
+ cols map[uint16]contentHandler
+}
+
+//Col Get the Nth Col from the Row, if has not, return nil.
+//Suggest use Has function to test it.
+func (r *Row) Col(i int) string {
+ serial := uint16(i)
+ if ch, ok := r.cols[serial]; ok {
+ strs := ch.String(r.wb)
+ return strs[0]
+ } else {
+ for _, v := range r.cols {
+ if v.FirstCol() <= serial && v.LastCol() >= serial {
+ strs := v.String(r.wb)
+ return strs[serial-v.FirstCol()]
+ }
+ }
+ }
+ return ""
+}
+
+//ColExact Get the Nth Col from the Row, if has not, return nil.
+//For merged cells value is returned for first cell only
+func (r *Row) ColExact(i int) string {
+ serial := uint16(i)
+ if ch, ok := r.cols[serial]; ok {
+ strs := ch.String(r.wb)
+ return strs[0]
+ }
+ return ""
+}
+
+//LastCol Get the number of Last Col of the Row.
+func (r *Row) LastCol() int {
+ return int(r.info.Lcell)
+}
+
+//FirstCol Get the number of First Col of the Row.
+func (r *Row) FirstCol() int {
+ return int(r.info.Fcell)
+}
diff --git a/godo/office/xls/sst.go b/godo/office/xls/sst.go
new file mode 100644
index 0000000..3c92e39
--- /dev/null
+++ b/godo/office/xls/sst.go
@@ -0,0 +1,6 @@
+package xls
+
+type SstInfo struct {
+ Total uint32
+ Count uint32
+}
diff --git a/godo/office/xls/workbook.go b/godo/office/xls/workbook.go
new file mode 100644
index 0000000..e0af09a
--- /dev/null
+++ b/godo/office/xls/workbook.go
@@ -0,0 +1,323 @@
+package xls
+
+import (
+ "bytes"
+ "encoding/binary"
+ "io"
+ "os"
+ "unicode/utf16"
+
+ "golang.org/x/text/encoding/charmap"
+)
+
+// xls workbook type
+type WorkBook struct {
+ Is5ver bool
+ Type uint16
+ Codepage uint16
+ Xfs []st_xf_data
+ Fonts []Font
+ Formats map[uint16]*Format
+ //All the sheets from the workbook
+ sheets []*WorkSheet
+ Author string
+ rs io.ReadSeeker
+ sst []string
+ continue_utf16 uint16
+ continue_rich uint16
+ continue_apsb uint32
+ dateMode uint16
+}
+
+// read workbook from ole2 file
+func newWorkBookFromOle2(rs io.ReadSeeker) *WorkBook {
+ wb := new(WorkBook)
+ wb.Formats = make(map[uint16]*Format)
+ // wb.bts = bts
+ wb.rs = rs
+ wb.sheets = make([]*WorkSheet, 0)
+ wb.Parse(rs)
+ return wb
+}
+
+func (w *WorkBook) Parse(buf io.ReadSeeker) {
+ b := new(bof)
+ bof_pre := new(bof)
+ // buf := bytes.NewReader(bts)
+ offset := 0
+ for {
+ if err := binary.Read(buf, binary.LittleEndian, b); err == nil {
+ bof_pre, b, offset = w.parseBof(buf, b, bof_pre, offset)
+ } else {
+ break
+ }
+ }
+}
+
+func (w *WorkBook) addXf(xf st_xf_data) {
+ w.Xfs = append(w.Xfs, xf)
+}
+
+func (w *WorkBook) addFont(font *FontInfo, buf io.ReadSeeker) {
+ name, _ := w.get_string(buf, uint16(font.NameB))
+ w.Fonts = append(w.Fonts, Font{Info: font, Name: name})
+}
+
+func (w *WorkBook) addFormat(format *Format) {
+ if w.Formats == nil {
+ os.Exit(1)
+ }
+ w.Formats[format.Head.Index] = format
+}
+
+func (wb *WorkBook) parseBof(buf io.ReadSeeker, b *bof, pre *bof, offset_pre int) (after *bof, after_using *bof, offset int) {
+ after = b
+ after_using = pre
+ var bts = make([]byte, b.Size)
+ binary.Read(buf, binary.LittleEndian, bts)
+ buf_item := bytes.NewReader(bts)
+ switch b.Id {
+ case 0x809:
+ bif := new(biffHeader)
+ binary.Read(buf_item, binary.LittleEndian, bif)
+ if bif.Ver != 0x600 {
+ wb.Is5ver = true
+ }
+ wb.Type = bif.Type
+ case 0x042: // CODEPAGE
+ binary.Read(buf_item, binary.LittleEndian, &wb.Codepage)
+ case 0x3c: // CONTINUE
+ if pre.Id == 0xfc {
+ var size uint16
+ var err error
+ if wb.continue_utf16 >= 1 {
+ size = wb.continue_utf16
+ wb.continue_utf16 = 0
+ } else {
+ err = binary.Read(buf_item, binary.LittleEndian, &size)
+ }
+ for err == nil && offset_pre < len(wb.sst) {
+ var str string
+ str, err = wb.get_string(buf_item, size)
+ wb.sst[offset_pre] = wb.sst[offset_pre] + str
+
+ if err == io.EOF {
+ break
+ }
+
+ offset_pre++
+ err = binary.Read(buf_item, binary.LittleEndian, &size)
+ }
+ }
+ offset = offset_pre
+ after = pre
+ after_using = b
+ case 0xfc: // SST
+ info := new(SstInfo)
+ binary.Read(buf_item, binary.LittleEndian, info)
+ wb.sst = make([]string, info.Count)
+ var size uint16
+ var i = 0
+ // dont forget to initialize offset
+ offset = 0
+ for ; i < int(info.Count); i++ {
+ var err error
+ err = binary.Read(buf_item, binary.LittleEndian, &size)
+ if err == nil {
+ var str string
+ str, err = wb.get_string(buf_item, size)
+ wb.sst[i] = wb.sst[i] + str
+ }
+
+ if err == io.EOF {
+ break
+ }
+ }
+ offset = i
+ case 0x85: // boundsheet
+ var bs = new(boundsheet)
+ binary.Read(buf_item, binary.LittleEndian, bs)
+ // different for BIFF5 and BIFF8
+ wb.addSheet(bs, buf_item)
+ case 0x0e0: // XF
+ if wb.Is5ver {
+ xf := new(Xf5)
+ binary.Read(buf_item, binary.LittleEndian, xf)
+ wb.addXf(xf)
+ } else {
+ xf := new(Xf8)
+ binary.Read(buf_item, binary.LittleEndian, xf)
+ wb.addXf(xf)
+ }
+ case 0x031: // FONT
+ f := new(FontInfo)
+ binary.Read(buf_item, binary.LittleEndian, f)
+ wb.addFont(f, buf_item)
+ case 0x41E: //FORMAT
+ font := new(Format)
+ binary.Read(buf_item, binary.LittleEndian, &font.Head)
+ font.str, _ = wb.get_string(buf_item, font.Head.Size)
+ wb.addFormat(font)
+ case 0x22: //DATEMODE
+ binary.Read(buf_item, binary.LittleEndian, &wb.dateMode)
+ }
+ return
+}
+func decodeWindows1251(enc []byte) string {
+ dec := charmap.Windows1251.NewDecoder()
+ out, _ := dec.Bytes(enc)
+ return string(out)
+}
+func (w *WorkBook) get_string(buf io.ReadSeeker, size uint16) (res string, err error) {
+ if w.Is5ver {
+ var bts = make([]byte, size)
+ _, err = buf.Read(bts)
+ res = decodeWindows1251(bts)
+ //res = string(bts)
+ } else {
+ var richtext_num = uint16(0)
+ var phonetic_size = uint32(0)
+ var flag byte
+ err = binary.Read(buf, binary.LittleEndian, &flag)
+ if flag&0x8 != 0 {
+ err = binary.Read(buf, binary.LittleEndian, &richtext_num)
+ } else if w.continue_rich > 0 {
+ richtext_num = w.continue_rich
+ w.continue_rich = 0
+ }
+ if flag&0x4 != 0 {
+ err = binary.Read(buf, binary.LittleEndian, &phonetic_size)
+ } else if w.continue_apsb > 0 {
+ phonetic_size = w.continue_apsb
+ w.continue_apsb = 0
+ }
+ if flag&0x1 != 0 {
+ var bts = make([]uint16, size)
+ var i = uint16(0)
+ for ; i < size && err == nil; i++ {
+ err = binary.Read(buf, binary.LittleEndian, &bts[i])
+ }
+
+ // when eof found, we dont want to append last element
+ var runes []rune
+ if err == io.EOF {
+ i = i - 1
+ }
+ runes = utf16.Decode(bts[:i])
+
+ res = string(runes)
+ if i < size {
+ w.continue_utf16 = size - i
+ }
+
+ } else {
+ var bts = make([]byte, size)
+ var n int
+ n, err = buf.Read(bts)
+ if uint16(n) < size {
+ w.continue_utf16 = size - uint16(n)
+ err = io.EOF
+ }
+
+ var bts1 = make([]uint16, n)
+ for k, v := range bts[:n] {
+ bts1[k] = uint16(v)
+ }
+ runes := utf16.Decode(bts1)
+ res = string(runes)
+ }
+ if richtext_num > 0 {
+ var bts []byte
+ var seek_size int64
+ if w.Is5ver {
+ seek_size = int64(2 * richtext_num)
+ } else {
+ seek_size = int64(4 * richtext_num)
+ }
+ bts = make([]byte, seek_size)
+ err = binary.Read(buf, binary.LittleEndian, bts)
+ if err == io.EOF {
+ w.continue_rich = richtext_num
+ }
+
+ // err = binary.Read(buf, binary.LittleEndian, bts)
+ }
+ if phonetic_size > 0 {
+ bts := make([]byte, phonetic_size)
+ err = binary.Read(buf, binary.LittleEndian, bts)
+ if err == io.EOF {
+ w.continue_apsb = phonetic_size
+ }
+ }
+ }
+ return
+}
+
+func (w *WorkBook) addSheet(sheet *boundsheet, buf io.ReadSeeker) {
+ name, _ := w.get_string(buf, uint16(sheet.Name))
+ w.sheets = append(w.sheets, &WorkSheet{bs: sheet, Name: name, wb: w, Visibility: TWorkSheetVisibility(sheet.Visible)})
+}
+
+// reading a sheet from the compress file to memory, you should call this before you try to get anything from sheet
+func (w *WorkBook) prepareSheet(sheet *WorkSheet) {
+ w.rs.Seek(int64(sheet.bs.Filepos), 0)
+ sheet.parse(w.rs)
+}
+
+// Get one sheet by its number
+func (w *WorkBook) GetSheet(num int) *WorkSheet {
+ if num < len(w.sheets) {
+ s := w.sheets[num]
+ if !s.parsed {
+ w.prepareSheet(s)
+ }
+ return s
+ } else {
+ return nil
+ }
+}
+
+// Get the number of all sheets, look into example
+func (w *WorkBook) NumSheets() int {
+ return len(w.sheets)
+}
+
+// helper function to read all cells from file
+// Notice: the max value is the limit of the max capacity of lines.
+// Warning: the helper function will need big memeory if file is large.
+func (w *WorkBook) ReadAllCells(max int) (res [][]string) {
+ res = make([][]string, 0)
+ for _, sheet := range w.sheets {
+ if len(res) < max {
+ max = max - len(res)
+ w.prepareSheet(sheet)
+ if sheet.MaxRow != 0 {
+ leng := int(sheet.MaxRow) + 1
+ if max < leng {
+ leng = max
+ }
+ temp := make([][]string, leng)
+ for k, row := range sheet.rows {
+ data := make([]string, 0)
+ if len(row.cols) > 0 {
+ for _, col := range row.cols {
+ if uint16(len(data)) <= col.LastCol() {
+ data = append(data, make([]string, col.LastCol()-uint16(len(data))+1)...)
+ }
+ str := col.String(w)
+
+ for i := uint16(0); i < col.LastCol()-col.FirstCol()+1; i++ {
+ data[col.FirstCol()+i] = str[i]
+ }
+ }
+ if leng > int(k) {
+ temp[k] = data
+ }
+ }
+ }
+ res = append(res, temp...)
+ }
+ }
+ }
+ return
+}
diff --git a/godo/office/xls/worksheet.go b/godo/office/xls/worksheet.go
new file mode 100644
index 0000000..0d001d5
--- /dev/null
+++ b/godo/office/xls/worksheet.go
@@ -0,0 +1,251 @@
+package xls
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "unicode/utf16"
+)
+
+type TWorkSheetVisibility byte
+
+const (
+ WorkSheetVisible TWorkSheetVisibility = 0
+ WorkSheetHidden TWorkSheetVisibility = 1
+ WorkSheetVeryHidden TWorkSheetVisibility = 2
+)
+
+type boundsheet struct {
+ Filepos uint32
+ Visible byte
+ Type byte
+ Name byte
+}
+
+// WorkSheet in one WorkBook
+type WorkSheet struct {
+ bs *boundsheet
+ wb *WorkBook
+ Name string
+ Selected bool
+ Visibility TWorkSheetVisibility
+ rows map[uint16]*Row
+ //NOTICE: this is the max row number of the sheet, so it should be count -1
+ MaxRow uint16
+ parsed bool
+ rightToLeft bool
+}
+
+func (w *WorkSheet) Row(i int) *Row {
+ row := w.rows[uint16(i)]
+ if row != nil {
+ row.wb = w.wb
+ }
+ return row
+}
+
+func (w *WorkSheet) parse(buf io.ReadSeeker) {
+ w.rows = make(map[uint16]*Row)
+ b := new(bof)
+ var bof_pre *bof
+ var col_pre interface{}
+ for {
+ if err := binary.Read(buf, binary.LittleEndian, b); err == nil {
+ bof_pre, col_pre = w.parseBof(buf, b, bof_pre, col_pre)
+ if b.Id == 0xa {
+ break
+ }
+ } else {
+ fmt.Println(err)
+ break
+ }
+ }
+ w.parsed = true
+}
+
+func (w *WorkSheet) parseBof(buf io.ReadSeeker, b *bof, _ *bof, col_pre interface{}) (*bof, interface{}) {
+ var col interface{}
+ var bts = make([]byte, b.Size)
+ binary.Read(buf, binary.LittleEndian, bts)
+ buf = bytes.NewReader(bts)
+ switch b.Id {
+ // case 0x0E5: //MERGEDCELLS
+ // ws.mergedCells(buf)
+ case 0x23E: // WINDOW2
+ var sheetOptions, firstVisibleRow, firstVisibleColumn uint16
+ binary.Read(buf, binary.LittleEndian, &sheetOptions)
+ binary.Read(buf, binary.LittleEndian, &firstVisibleRow) // not valuable
+ binary.Read(buf, binary.LittleEndian, &firstVisibleColumn) // not valuable
+ //buf.Seek(int64(b.Size)-2*3, 1)
+ w.rightToLeft = (sheetOptions & 0x40) != 0
+ w.Selected = (sheetOptions & 0x400) != 0
+ case 0x208: //ROW
+ r := new(rowInfo)
+ binary.Read(buf, binary.LittleEndian, r)
+ w.addRow(r)
+ case 0x0BD: //MULRK
+ mc := new(MulrkCol)
+ size := (b.Size - 6) / 6
+ binary.Read(buf, binary.LittleEndian, &mc.Col)
+ mc.Xfrks = make([]XfRk, size)
+ for i := uint16(0); i < size; i++ {
+ binary.Read(buf, binary.LittleEndian, &mc.Xfrks[i])
+ }
+ binary.Read(buf, binary.LittleEndian, &mc.LastColB)
+ col = mc
+ case 0x0BE: //MULBLANK
+ mc := new(MulBlankCol)
+ size := (b.Size - 6) / 2
+ binary.Read(buf, binary.LittleEndian, &mc.Col)
+ mc.Xfs = make([]uint16, size)
+ for i := uint16(0); i < size; i++ {
+ binary.Read(buf, binary.LittleEndian, &mc.Xfs[i])
+ }
+ binary.Read(buf, binary.LittleEndian, &mc.LastColB)
+ col = mc
+ case 0x203: //NUMBER
+ col = new(NumberCol)
+ binary.Read(buf, binary.LittleEndian, col)
+ case 0x06: //FORMULA
+ c := new(FormulaCol)
+ binary.Read(buf, binary.LittleEndian, &c.Header)
+ c.Bts = make([]byte, b.Size-20)
+ binary.Read(buf, binary.LittleEndian, &c.Bts)
+ col = c
+ case 0x207: //STRING = FORMULA-VALUE is expected right after FORMULA
+ if ch, ok := col_pre.(*FormulaCol); ok {
+ c := new(FormulaStringCol)
+ c.Col = ch.Header.Col
+ var cStringLen uint16
+ binary.Read(buf, binary.LittleEndian, &cStringLen)
+ str, err := w.wb.get_string(buf, cStringLen)
+ if nil == err {
+ c.RenderedValue = str
+ }
+ col = c
+ }
+ case 0x27e: //RK
+ col = new(RkCol)
+ binary.Read(buf, binary.LittleEndian, col)
+ case 0xFD: //LABELSST
+ col = new(LabelsstCol)
+ binary.Read(buf, binary.LittleEndian, col)
+ case 0x204:
+ c := new(labelCol)
+ binary.Read(buf, binary.LittleEndian, &c.BlankCol)
+ var count uint16
+ binary.Read(buf, binary.LittleEndian, &count)
+ c.Str, _ = w.wb.get_string(buf, count)
+ col = c
+ case 0x201: //BLANK
+ col = new(BlankCol)
+ binary.Read(buf, binary.LittleEndian, col)
+ case 0x1b8: //HYPERLINK
+ var hy HyperLink
+ binary.Read(buf, binary.LittleEndian, &hy.CellRange)
+ buf.Seek(20, 1)
+ var flag uint32
+ binary.Read(buf, binary.LittleEndian, &flag)
+ var count uint32
+
+ if flag&0x14 != 0 {
+ binary.Read(buf, binary.LittleEndian, &count)
+ hy.Description = b.utf16String(buf, count)
+ }
+ if flag&0x80 != 0 {
+ binary.Read(buf, binary.LittleEndian, &count)
+ hy.TargetFrame = b.utf16String(buf, count)
+ }
+ if flag&0x1 != 0 {
+ var guid [2]uint64
+ binary.Read(buf, binary.BigEndian, &guid)
+ if guid[0] == 0xE0C9EA79F9BACE11 && guid[1] == 0x8C8200AA004BA90B { //URL
+ hy.IsUrl = true
+ binary.Read(buf, binary.LittleEndian, &count)
+ hy.Url = b.utf16String(buf, count/2)
+ } else if guid[0] == 0x303000000000000 && guid[1] == 0xC000000000000046 { //URL{
+ var upCount uint16
+ binary.Read(buf, binary.LittleEndian, &upCount)
+ binary.Read(buf, binary.LittleEndian, &count)
+ bts := make([]byte, count)
+ binary.Read(buf, binary.LittleEndian, &bts)
+ hy.ShortedFilePath = string(bts)
+ buf.Seek(24, 1)
+ binary.Read(buf, binary.LittleEndian, &count)
+ if count > 0 {
+ binary.Read(buf, binary.LittleEndian, &count)
+ buf.Seek(2, 1)
+ hy.ExtendedFilePath = b.utf16String(buf, count/2+1)
+ }
+ }
+ }
+ if flag&0x8 != 0 {
+ binary.Read(buf, binary.LittleEndian, &count)
+ var bts = make([]uint16, count)
+ binary.Read(buf, binary.LittleEndian, &bts)
+ runes := utf16.Decode(bts[:len(bts)-1])
+ hy.TextMark = string(runes)
+ }
+
+ w.addRange(&hy.CellRange, &hy)
+ case 0x809:
+ buf.Seek(int64(b.Size), 1)
+ case 0xa:
+ default:
+ // log.Printf("Unknow %X,%d\n", b.Id, b.Size)
+ buf.Seek(int64(b.Size), 1)
+ }
+ if col != nil {
+ w.add(col)
+ }
+ return b, col
+}
+
+func (w *WorkSheet) add(content interface{}) {
+ if ch, ok := content.(contentHandler); ok {
+ if col, ok := content.(Coler); ok {
+ w.addCell(col, ch)
+ }
+ }
+
+}
+
+func (w *WorkSheet) addCell(col Coler, ch contentHandler) {
+ w.addContent(col.Row(), ch)
+}
+
+func (w *WorkSheet) addRange(rang Ranger, ch contentHandler) {
+
+ for i := rang.FirstRow(); i <= rang.LastRow(); i++ {
+ w.addContent(i, ch)
+ }
+}
+
+func (w *WorkSheet) addContent(row_num uint16, ch contentHandler) {
+ var row *Row
+ var ok bool
+ if row, ok = w.rows[row_num]; !ok {
+ info := new(rowInfo)
+ info.Index = row_num
+ row = w.addRow(info)
+ }
+ if row.info.Lcell < ch.LastCol() {
+ row.info.Lcell = ch.LastCol()
+ }
+ row.cols[ch.FirstCol()] = ch
+}
+
+func (w *WorkSheet) addRow(info *rowInfo) (row *Row) {
+ if info.Index > w.MaxRow {
+ w.MaxRow = info.Index
+ }
+ var ok bool
+ if row, ok = w.rows[info.Index]; ok {
+ row.info = info
+ } else {
+ row = &Row{info: info, cols: make(map[uint16]contentHandler)}
+ w.rows[info.Index] = row
+ }
+ return
+}
diff --git a/godo/office/xls/xf.go b/godo/office/xls/xf.go
new file mode 100644
index 0000000..8f4dd1e
--- /dev/null
+++ b/godo/office/xls/xf.go
@@ -0,0 +1,37 @@
+package xls
+
+type Xf5 struct {
+ Font uint16
+ Format uint16
+ Type uint16
+ Align uint16
+ Color uint16
+ Fill uint16
+ Border uint16
+ Linestyle uint16
+}
+
+func (x *Xf5) formatNo() uint16 {
+ return x.Format
+}
+
+type Xf8 struct {
+ Font uint16
+ Format uint16
+ Type uint16
+ Align byte
+ Rotation byte
+ Ident byte
+ Usedattr byte
+ Linestyle uint32
+ Linecolor uint32
+ Groundcolor uint16
+}
+
+func (x *Xf8) formatNo() uint16 {
+ return x.Format
+}
+
+type st_xf_data interface {
+ formatNo() uint16
+}
diff --git a/godo/office/xls/xls.go b/godo/office/xls/xls.go
new file mode 100644
index 0000000..c8a69ef
--- /dev/null
+++ b/godo/office/xls/xls.go
@@ -0,0 +1,61 @@
+package xls
+
+import (
+ "io"
+ "os"
+
+ "godo/office/ole2"
+)
+
+// Open one xls file
+func Open(file string, charset string) (*WorkBook, error) {
+ if fi, err := os.Open(file); err == nil {
+ return OpenReader(fi, charset)
+ } else {
+ return nil, err
+ }
+}
+
+// Open one xls file and return the closer
+func OpenWithCloser(file string, charset string) (*WorkBook, io.Closer, error) {
+ if fi, err := os.Open(file); err == nil {
+ wb, err := OpenReader(fi, charset)
+ return wb, fi, err
+ } else {
+ return nil, nil, err
+ }
+}
+
+// Open xls file from reader
+func OpenReader(reader io.ReadSeeker, charset string) (wb *WorkBook, err error) {
+ var ole *ole2.Ole
+ if ole, err = ole2.Open(reader, charset); err == nil {
+ var dir []*ole2.File
+ if dir, err = ole.ListDir(); err == nil {
+ var book *ole2.File
+ var root *ole2.File
+ for _, file := range dir {
+ name := file.Name()
+ if name == "Workbook" {
+ if book == nil {
+ book = file
+ }
+ //book = file
+ // break
+ }
+ if name == "Book" {
+ book = file
+ // break
+ }
+ if name == "Root Entry" {
+ root = file
+ }
+ }
+ if book != nil {
+ wb = newWorkBookFromOle2(ole.OpenFile(book, root))
+ return
+ }
+ }
+ }
+ return
+}
diff --git a/godo/office/xls/xls_test.go b/godo/office/xls/xls_test.go
new file mode 100644
index 0000000..57c809c
--- /dev/null
+++ b/godo/office/xls/xls_test.go
@@ -0,0 +1,28 @@
+package xls
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestOpen(t *testing.T) {
+ if xlFile, err := Open("t1.xls", "utf-8"); err == nil {
+ if sheet1 := xlFile.GetSheet(0); sheet1 != nil {
+ fmt.Println("Total Lines ", sheet1.MaxRow, sheet1.Name)
+ for i := 265; i <= 267; i++ {
+ fmt.Printf("row %v point %v \n", i, sheet1.Row(i))
+ if sheet1.Row(i) == nil {
+ continue
+ }
+ row := sheet1.Row(i)
+ for index := row.FirstCol(); index < row.LastCol(); index++ {
+ fmt.Println(index, "==>", row.Col(index), " ")
+ fmt.Printf("%T\n", row.cols[uint16(index)])
+ }
+ // col1 := .Cols[0]
+ // col2 := sheet1.Row(uint16(i)].Cols[1]
+ // fmt.Printf("\ncol1 %v \nCol2 %v \n", col1.String(xlFile), col2.String(xlFile))
+ }
+ }
+ }
+}
diff --git a/godo/office/xlsx/LICENSE b/godo/office/xlsx/LICENSE
new file mode 100644
index 0000000..26adc59
--- /dev/null
+++ b/godo/office/xlsx/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2019 The DataShed
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/godo/office/xlsx/README.md b/godo/office/xlsx/README.md
new file mode 100644
index 0000000..ef185bc
--- /dev/null
+++ b/godo/office/xlsx/README.md
@@ -0,0 +1,98 @@
+
+
+# xlsxreader: A Go Package for reading data from an xlsx file
+
+## Overview
+
+[](https://pkg.go.dev/github.com/thedatashed/xlsxreader)
+[](https://goreportcard.com/report/github.com/thedatashed/xlsxreader)
+
+A low-memory high performance library for reading data from an xlsx file.
+
+Suitable for reading .xlsx data and designed to aid with the bulk uploading of data where the key requirement is to parse and read raw data.
+
+The reader will read data out row by row (1->n) and has no concept of headers or data types (this is to be managed by the consumer).
+
+The reader is currently not concerned with handling some of the more advanced cell data that can be stored in a xlsx file.
+
+Further reading on how this came to be is available on our [blog](https://www.thedatashed.co.uk/2019/02/13/go-shedsheet-reader/)
+
+## Install
+
+```
+go get github.com/thedatashed/xlsxreader
+```
+
+## Example Usage
+
+Reading from the file system:
+
+```go
+package main
+
+import (
+ "github.com/thedatashed/xlsxreader"
+)
+
+func main() {
+ // Create an instance of the reader by opening a target file
+ xl, _ := xlsxreader.OpenFile("./test.xlsx")
+
+ // Ensure the file reader is closed once utilised
+ defer xl.Close()
+
+ // Iterate on the rows of data
+ for row := range xl.ReadRows(xl.Sheets[0]){
+ ...
+ }
+}
+```
+
+Reading from an already in-memory source
+
+```go
+package main
+
+import (
+ "io/ioutil"
+ "github.com/thedatashed/xlsxreader"
+)
+
+func main() {
+
+ // Preprocessing of file data
+ file, _ := os.Open("./test/test-small.xlsx")
+ defer file.Close()
+ bytes, _ := ioutil.ReadAll(file)
+
+ // Create an instance of the reader by providing a data stream
+ xl, _ := xlsxreader.NewReader(bytes)
+
+ // Iterate on the rows of data
+ for row := range xl.ReadRows(xl.Sheets[0]){
+ ...
+ }
+}
+```
+
+## Key Concepts
+
+### Files
+
+The reader operates on a single file and will read data from the specified file using the `OpenFile` function.
+
+### Data
+
+The Reader can also be instantiated with a byte array by using the `NewReader` function.
+
+### Sheets
+
+An xlsx workbook can contain many worksheets, when reading data, the target sheet name should be passed. To process multiple sheets, either iterate on the array of sheet names identified by the reader or make multiple calls to the `ReadRows` function with the desired sheet names.
+
+### Rows
+
+A sheet contains n rows of data, the reader returns an iterator that can be accessed to cycle through each row of data in a worksheet. Each row holds an index and contains n cells that contain column data.
+
+### Cells
+
+A cell represents a row/column value and contains a string representation of that data. Currently numeric data is parsed as found, with dates parsed to ISO 8601 / RFC3339 format.
diff --git a/godo/office/xlsx/date.go b/godo/office/xlsx/date.go
new file mode 100644
index 0000000..0d2cfcf
--- /dev/null
+++ b/godo/office/xlsx/date.go
@@ -0,0 +1,42 @@
+package xlsx
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "time"
+)
+
+// nanoSecondsPerDay defines a constant value of the number of nanoseconds in a day.
+const nanoSecondsPerDay = 24 * 60 * 60 * 1000 * 1000 * 1000
+
+// excelEpoch specifies the epoch of all excel dates. Dates are internally represented
+// as relative to this value.
+var excelEpoch = time.Date(1899, 12, 30, 0, 0, 0, 0, time.UTC)
+
+// convertExcelDateToDateString takes an excel numeric representation of a date, and
+// converts it to a human-readable RFC3339 or ISO formatted string.
+//
+// Excel dates are stored within excel as a signed floating point number.
+// The integer portion determines the number of days ahead of 30/12/1899 the date is.
+// The portion after the decimal point represents the proportion through the day.
+// For example, 6am would be 1/4 of the way through a 24hr day, so it is stored as 0.25.
+func convertExcelDateToDateString(value string) (string, error) {
+ floatValue, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ return "", fmt.Errorf("unable to parse date float value: %w", err)
+ }
+
+ numberOfDays := math.Trunc(floatValue)
+ numberOfNanoSeconds := (floatValue - numberOfDays) * nanoSecondsPerDay
+
+ actualTime := excelEpoch.AddDate(0, 0, int(numberOfDays)).Add(time.Duration(numberOfNanoSeconds))
+
+ formatString := time.RFC3339
+ if floatValue == numberOfDays {
+ // We are dealing with a date, and not a datetime
+ formatString = "2006-01-02"
+ }
+
+ return actualTime.Format(formatString), nil
+}
diff --git a/godo/office/xlsx/file.go b/godo/office/xlsx/file.go
new file mode 100644
index 0000000..fdeed0d
--- /dev/null
+++ b/godo/office/xlsx/file.go
@@ -0,0 +1,171 @@
+package xlsx
+
+import (
+ "archive/zip"
+ "bytes"
+ "fmt"
+ "io"
+ "sync"
+)
+
+// XlsxFile defines a populated XLSX file struct.
+type XlsxFile struct {
+ Sheets []string
+
+ sheetFiles map[string]*zip.File
+ sharedStrings []string
+ dateStyles map[int]bool
+
+ doneCh chan struct{} // doneCh serves as a signal to abort unfinished operations.
+}
+
+// XlsxFileCloser wraps XlsxFile to be able to close an open file
+type XlsxFileCloser struct {
+ zipReadCloser *zip.ReadCloser
+ XlsxFile
+
+ once sync.Once // once performs actions exactly once, e.g. closing a channel.
+}
+
+// getFileForName finds and returns a *zip.File by it's display name from within an archive.
+// If the file cannot be found, an error is returned.
+func getFileForName(files []*zip.File, name string) (*zip.File, error) {
+ for _, file := range files {
+ if file.Name == name {
+ return file, nil
+ }
+ }
+
+ return nil, fmt.Errorf("file not found: %s", name)
+}
+
+// readFile opens and reads the entire contents of a *zip.File into memory.
+// If the file cannot be opened, or the data cannot be read, an error is returned.
+func readFile(file *zip.File) ([]byte, error) {
+ rc, err := file.Open()
+ if err != nil {
+ return []byte{}, fmt.Errorf("unable to open file: %w", err)
+ }
+ defer rc.Close()
+
+ buff := bytes.NewBuffer(nil)
+ _, err = io.Copy(buff, rc)
+ if err != nil {
+ return []byte{}, fmt.Errorf("unable to copy bytes: %w", err)
+ }
+ return buff.Bytes(), nil
+}
+
+// GetSheetFileForSheetName returns the sheet file associated with the sheet name.
+// This is useful when you want to further process something out of the sheet, that this
+// library does not handle. For example this is useful when trying to read the hyperlinks
+// section of a sheet file; getting the sheet file enables you to read the XML directly.
+func (xl *XlsxFileCloser) GetSheetFileForSheetName(sheetName string) *zip.File {
+ sheetFile, _ := xl.sheetFiles[sheetName]
+ return sheetFile
+}
+
+// Close closes the XlsxFile, rendering it unusable for I/O.
+func (xl *XlsxFileCloser) Close() error {
+ if xl == nil {
+ return nil
+ }
+ xl.once.Do(func() { close(xl.doneCh) })
+ return xl.zipReadCloser.Close()
+}
+
+// OpenFile takes the name of an XLSX file and returns a populated XlsxFile struct for it.
+// If the file cannot be found, or key parts of the files contents are missing, an error
+// is returned.
+// Note that the file must be Close()-d when you are finished with it.
+func OpenFile(filename string) (*XlsxFileCloser, error) {
+ zipFile, err := zip.OpenReader(filename)
+ if err != nil {
+ return nil, fmt.Errorf("unable to open file reader: %w", err)
+ }
+
+ x := XlsxFile{}
+ if err := x.init(&zipFile.Reader); err != nil {
+ zipFile.Close()
+ return nil, fmt.Errorf("unable to initialise file: %w", err)
+ }
+
+ return &XlsxFileCloser{
+ XlsxFile: x,
+ zipReadCloser: zipFile,
+ }, nil
+}
+
+// OpenReaderZip takes the zip ReadCloser of an XLSX file and returns a populated XlsxFileCloser struct for it.
+// If the file cannot be found, or key parts of the files contents are missing, an error
+// is returned.
+// Note that the file must be Close()-d when you are finished with it.
+func OpenReaderZip(rc *zip.ReadCloser) (*XlsxFileCloser, error) {
+ x := XlsxFile{}
+
+ if err := x.init(&rc.Reader); err != nil {
+ rc.Close()
+ return nil, err
+ }
+
+ return &XlsxFileCloser{
+ XlsxFile: x,
+ zipReadCloser: rc,
+ }, nil
+}
+
+// NewReader takes bytes of Xlsx file and returns a populated XlsxFile struct for it.
+// If the file cannot be found, or key parts of the files contents are missing, an error
+// is returned.
+func NewReader(xlsxBytes []byte) (*XlsxFile, error) {
+ r, err := zip.NewReader(bytes.NewReader(xlsxBytes), int64(len(xlsxBytes)))
+ if err != nil {
+ return nil, fmt.Errorf("unable to create new reader: %w", err)
+ }
+
+ x := XlsxFile{}
+ err = x.init(r)
+ if err != nil {
+ return nil, fmt.Errorf("unable to initialise file: %w", err)
+ }
+
+ return &x, nil
+}
+
+// NewReaderZip takes zip reader of Xlsx file and returns a populated XlsxFile struct for it.
+// If the file cannot be found, or key parts of the files contents are missing, an error
+// is returned.
+func NewReaderZip(r *zip.Reader) (*XlsxFile, error) {
+ x := XlsxFile{}
+
+ if err := x.init(r); err != nil {
+ return nil, fmt.Errorf("unable to initialise file: %w", err)
+ }
+
+ return &x, nil
+}
+
+func (x *XlsxFile) init(zipReader *zip.Reader) error {
+ sharedStrings, err := getSharedStrings(zipReader.File)
+ if err != nil {
+ return fmt.Errorf("unable to get shared strings: %w", err)
+ }
+
+ sheets, sheetFiles, err := getWorksheets(zipReader.File)
+ if err != nil {
+ return fmt.Errorf("unable to get worksheets: %w", err)
+ }
+
+ dateStyles, err := getDateFormatStyles(zipReader.File)
+ if err != nil {
+ return fmt.Errorf("unable to get date styles: %w", err)
+ }
+
+ x.sharedStrings = sharedStrings
+ x.Sheets = sheets
+ x.sheetFiles = *sheetFiles
+ x.dateStyles = *dateStyles
+ x.doneCh = make(chan struct{})
+
+ return nil
+}
diff --git a/godo/office/xlsx/rows.go b/godo/office/xlsx/rows.go
new file mode 100644
index 0000000..75dce97
--- /dev/null
+++ b/godo/office/xlsx/rows.go
@@ -0,0 +1,398 @@
+package xlsx
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+// rawRow represent the raw XML element for parsing a row of data.
+type rawRow struct {
+ Index int `xml:"r,attr,omitempty"`
+ RawCells []rawCell `xml:"c"`
+}
+
+func (rr *rawRow) unmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ for _, attr := range start.Attr {
+ if attr.Name.Local != "r" {
+ continue
+ }
+
+ var err error
+
+ if rr.Index, err = strconv.Atoi(attr.Value); err != nil {
+ return fmt.Errorf("unable to parse row index: %w", err)
+ }
+ }
+
+ for {
+ tok, err := d.Token()
+ if err != nil {
+ return fmt.Errorf("error retrieving xml token: %w", err)
+ }
+
+ var se xml.StartElement
+
+ switch el := tok.(type) {
+ case xml.StartElement:
+ se = el
+ case xml.EndElement:
+ if el == start.End() {
+ return nil
+ }
+ default:
+ continue
+ }
+
+ if se.Name.Local != "c" {
+ continue
+ }
+
+ var rc rawCell
+ if err = rc.unmarshalXML(d, se); err != nil {
+ return fmt.Errorf("unable to unmarshal cell: %w", err)
+ }
+
+ rr.RawCells = append(rr.RawCells, rc)
+ }
+}
+
+// rawCell represents the raw XML element for parsing a cell.
+type rawCell struct {
+ Reference string `xml:"r,attr"` // E.g. A1
+ Type string `xml:"t,attr,omitempty"`
+ Value *string `xml:"v,omitempty"`
+ Style int `xml:"s,attr"`
+ InlineString *string `xml:"is>t"`
+}
+
+func (rc *rawCell) unmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ // unmarshal attributes
+ for _, attr := range start.Attr {
+ switch attr.Name.Local {
+ case "r":
+ rc.Reference = attr.Value
+ case "t":
+ rc.Type = attr.Value
+ case "s":
+ var err error
+
+ if rc.Style, err = strconv.Atoi(attr.Value); err != nil {
+ return err
+ }
+ }
+ }
+
+ for {
+ tok, err := d.Token()
+ if err != nil {
+ return fmt.Errorf("error retrieving xml token: %w", err)
+ }
+
+ var se xml.StartElement
+
+ switch el := tok.(type) {
+ case xml.StartElement:
+ se = el
+ case xml.EndElement:
+ if el == start.End() {
+ return nil
+ }
+ continue
+ default:
+ continue
+ }
+
+ switch se.Name.Local {
+ case "is":
+ err = rc.unmarshalInlineString(d, se)
+ case "v":
+ var v string
+
+ if v, err = getCharData(d); err != nil {
+ return err
+ }
+
+ rc.Value = &v
+ default:
+ continue
+ }
+
+ if err != nil {
+ return fmt.Errorf("unable to parse cell data: %w", err)
+ }
+ }
+}
+
+func (rc *rawCell) unmarshalInlineString(d *xml.Decoder, start xml.StartElement) error {
+ for {
+ tok, err := d.Token()
+ if err != nil {
+ return fmt.Errorf("error retrieving xml token: %w", err)
+ }
+
+ var se xml.StartElement
+
+ switch el := tok.(type) {
+ case xml.StartElement:
+ se = el
+ case xml.EndElement:
+ if el == start.End() {
+ return nil
+ }
+ continue
+ default:
+ continue
+ }
+
+ if se.Name.Local != "t" {
+ continue
+ }
+
+ v, err := getCharData(d)
+ if err != nil {
+ return fmt.Errorf("unable to parse string: %w", err)
+ }
+
+ rc.InlineString = &v
+ return nil
+ }
+}
+
+// Row represents a row of data read from an Xlsx file, in a consumable format
+type Row struct {
+ Error error
+ Index int
+ Cells []Cell
+}
+
+// Cell represents the data in a single cell as a consumable format.
+type Cell struct {
+ Column string // E.G A, B, C
+ Row int
+ Value string
+ Type CellType
+}
+
+// CellType defines the data type of an excel cell
+type CellType string
+
+const (
+ // TypeString is for text cells
+ TypeString CellType = "string"
+ // TypeNumerical is for numerical values
+ TypeNumerical CellType = "numerical"
+ // TypeDateTime is for date values
+ TypeDateTime CellType = "datetime"
+ // TypeBoolean is for true/false values
+ TypeBoolean CellType = "boolean"
+)
+
+// ColumnIndex gives a number, representing the column the cell lies beneath.
+func (c Cell) ColumnIndex() int {
+ return asIndex(c.Column)
+}
+
+// getCellValue interrogates a raw cell to get a textual representation of the cell's contents.
+// Numerical values are returned in their string format.
+// Dates are returned as an ISO YYYY-MM-DD formatted string.
+// Datetimes are returned in RFC3339 (ISO-8601) YYYY-MM-DDTHH:MM:SSZ formatted string.
+func (x *XlsxFile) getCellValue(r rawCell) (string, error) {
+ if r.Type == "inlineStr" {
+ if r.InlineString == nil {
+ return "", fmt.Errorf("cell had type of InlineString, but the InlineString attribute was missing")
+ }
+ return *r.InlineString, nil
+ }
+
+ if r.Value == nil {
+ return "", fmt.Errorf("unable to get cell value for cell %s - no value element found", r.Reference)
+ }
+
+ if r.Type == "s" {
+ index, err := strconv.Atoi(*r.Value)
+ if err != nil {
+ return "", err
+ }
+ if len(x.sharedStrings) <= index {
+ return "", fmt.Errorf("attempted to index value %d in shared strings of length %d",
+ index, len(x.sharedStrings))
+ }
+
+ return x.sharedStrings[index], nil
+ }
+
+ if x.dateStyles[r.Style] && r.Type != "d" {
+ formattedDate, err := convertExcelDateToDateString(*r.Value)
+ if err != nil {
+ return "", err
+ }
+ return formattedDate, nil
+ }
+
+ return *r.Value, nil
+}
+
+func (x *XlsxFile) getCellType(r rawCell) CellType {
+ if x.dateStyles[r.Style] {
+ return TypeDateTime
+ }
+
+ switch r.Type {
+ case "b":
+ return TypeBoolean
+ case "d":
+ return TypeDateTime
+ case "n", "":
+ return TypeNumerical
+ case "s", "inlineStr":
+ return TypeString
+ default:
+ return TypeString
+ }
+}
+
+// readSheetRows iterates over "row" elements within a worksheet,
+// pushing a parsed Row struct into a channel for each one.
+func (x *XlsxFile) readSheetRows(sheet string, ch chan<- Row) {
+ defer close(ch)
+
+ xmlFile, err := x.openSheetFile(sheet)
+ if err != nil {
+ select {
+ case <-x.doneCh:
+ case ch <- Row{Error: err}:
+ }
+ return
+ }
+ defer xmlFile.Close()
+
+ decoder := xml.NewDecoder(xmlFile)
+ for {
+ token, _ := decoder.Token()
+ if token == nil {
+ return
+ }
+
+ switch startElement := token.(type) {
+ case xml.StartElement:
+ if startElement.Name.Local == "row" {
+ row := x.parseRow(decoder, &startElement)
+ if len(row.Cells) < 1 && row.Error == nil {
+ continue
+ }
+ select {
+ case <-x.doneCh:
+ return
+ case ch <- row:
+ }
+ }
+ }
+ }
+}
+
+func (x *XlsxFile) openSheetFile(sheet string) (io.ReadCloser, error) {
+ file, ok := x.sheetFiles[sheet]
+ if !ok {
+ return nil, fmt.Errorf("unable to open sheet %s", sheet)
+ }
+ return file.Open()
+}
+
+// parseRow parses the raw XML of a row element into a consumable Row struct.
+// The Row struct returned will contain any errors that occurred either in
+// interrogating values, or in parsing the XML.
+func (x *XlsxFile) parseRow(decoder *xml.Decoder, startElement *xml.StartElement) Row {
+ var r rawRow
+ err := r.unmarshalXML(decoder, *startElement)
+ if err != nil {
+ return Row{
+ Error: err,
+ Index: r.Index,
+ }
+ }
+
+ cells, err := x.parseRawCells(r.RawCells, r.Index)
+ if err != nil {
+ return Row{
+ Error: err,
+ Index: r.Index,
+ }
+ }
+ return Row{
+ Cells: cells,
+ Index: r.Index,
+ }
+}
+
+// parseRawCells converts a slice of structs containing a raw representation of the XML into
+// a standardised slice of Cell structs. An error will be returned if it is not possible
+// to interpret the value of any of the cells.
+func (x *XlsxFile) parseRawCells(rawCells []rawCell, index int) ([]Cell, error) {
+ cells := []Cell{}
+ for _, rawCell := range rawCells {
+ if rawCell.Value == nil && rawCell.InlineString == nil {
+ // This cell is empty, so ignore it
+ continue
+ }
+ column := strings.Map(removeNonAlpha, rawCell.Reference)
+ val, err := x.getCellValue(rawCell)
+ if err != nil {
+ return nil, err
+ }
+
+ cells = append(cells, Cell{
+ Column: column,
+ Row: index,
+ Value: val,
+ Type: x.getCellType(rawCell),
+ })
+ }
+
+ return cells, nil
+}
+
+// ReadRows provides an interface allowing rows from a specific worksheet to be streamed
+// from an xlsx file.
+// In order to provide a simplistic interface, this method returns a channel that can be
+// range-d over.
+//
+// If you want to read only some of the values, please ensure that the Close() method is
+// called after processing the entire file to stop all active goroutines and prevent any
+// potential goroutine leaks.
+//
+// Notes:
+// Xlsx sheets may omit cells which are empty, meaning a row may not have continuous cell
+// references. This function makes no attempt to fill/pad the missing cells.
+func (x *XlsxFile) ReadRows(sheet string) chan Row {
+ rowChannel := make(chan Row)
+ go x.readSheetRows(sheet, rowChannel)
+ return rowChannel
+}
+
+// removeNonAlpha is used in combination with strings.Map to remove any non alpha-numeric
+// characters from a cell reference, returning just the column name in a consistent uppercase format.
+// For example, a11 -> A, AA1 -> AA
+func removeNonAlpha(r rune) rune {
+ if 'A' <= r && r <= 'Z' {
+ return r
+ }
+ if 'a' <= r && r <= 'z' {
+ // make it uppercase
+ return r - 32
+ }
+ // drop the rune
+ return -1
+}
+
+// cell name to cell index. 'A' -> 0, 'Z' -> 25, 'AA' -> 26
+func asIndex(s string) int {
+ index := 0
+ for _, c := range s {
+ index *= 26
+ index += int(c) - 'A' + 1
+ }
+ return index - 1
+}
diff --git a/godo/office/xlsx/shared.go b/godo/office/xlsx/shared.go
new file mode 100644
index 0000000..e560855
--- /dev/null
+++ b/godo/office/xlsx/shared.go
@@ -0,0 +1,203 @@
+package xlsx
+
+import (
+ "archive/zip"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+// sharedStringsValue is a struct that holds the value of the shared strings.
+type sharedStringsValue struct {
+ Text string `xml:"t"`
+ RichText []string `xml:"r>t"`
+}
+
+func (sv *sharedStringsValue) unmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ for {
+ tok, err := d.Token()
+ if err != nil {
+ return fmt.Errorf("error retrieving xml token: %w", err)
+ }
+
+ var se xml.StartElement
+
+ switch el := tok.(type) {
+ case xml.EndElement:
+ if el == start.End() {
+ return nil
+ }
+ continue
+ case xml.StartElement:
+ se = el
+ default:
+ continue
+ }
+
+ switch se.Name.Local {
+ case "t":
+ sv.Text, err = getCharData(d)
+ case "r":
+ err = sv.decodeRichText(d, se)
+ default:
+ continue
+ }
+
+ if err != nil {
+ return fmt.Errorf("unable to parse string: %w", err)
+ }
+ }
+}
+
+func (sv *sharedStringsValue) decodeRichText(d *xml.Decoder, start xml.StartElement) error {
+ for {
+ tok, err := d.Token()
+ if err != nil {
+ return fmt.Errorf("unable to get shared strings value token: %w", err)
+ }
+
+ var se xml.StartElement
+
+ switch el := tok.(type) {
+ case xml.StartElement:
+ se = el
+ case xml.EndElement:
+ if el == start.End() {
+ return nil
+ }
+ continue
+ default:
+ continue
+ }
+
+ if se.Name.Local != "t" {
+ continue
+ }
+
+ var s string
+
+ if s, err = getCharData(d); err != nil {
+ return fmt.Errorf("unable to parse string: %w", err)
+ }
+
+ sv.RichText = append(sv.RichText, s)
+ }
+}
+
+// String gets a string value from the raw sharedStringsValue struct.
+// Since the values can appear in many different places in the xml structure, we need to normalise this.
+// They can either be:
+// value
+// or
+// val ue
+func (sv *sharedStringsValue) String() string {
+ // fast path: no rich text, just return text
+ if len(sv.RichText) == 0 {
+ return sv.Text
+ }
+
+ var sb strings.Builder
+ for _, t := range sv.RichText {
+ sb.WriteString(t)
+ }
+
+ return sb.String()
+}
+
+// Reset zeroes data inside struct.
+func (sv *sharedStringsValue) Reset() {
+ sv.Text = ""
+ sv.RichText = sv.RichText[:0]
+}
+
+// Sentinel error to indicate that no shared strings file can be found
+var errNoSharedStrings = errors.New("no shared strings file exists")
+
+// getSharedStringsFile attempts to find and return the zip.File struct associated with the
+// shared strings section of an xlsx file. An error is returned if the sharedStrings file
+// does not exist, or cannot be found.
+func getSharedStringsFile(files []*zip.File) (*zip.File, error) {
+ for _, file := range files {
+ if file.Name == "xl/sharedStrings.xml" || file.Name == "xl/SharedStrings.xml" {
+ return file, nil
+ }
+ }
+
+ return nil, errNoSharedStrings
+}
+
+// getSharedStrings loads the contents of the shared string file into memory.
+// This serves as a large lookup table of values, so we can efficiently parse rows.
+func getSharedStrings(files []*zip.File) ([]string, error) {
+ ssFile, err := getSharedStringsFile(files)
+ if err != nil && errors.Is(err, errNoSharedStrings) {
+ // Valid to contain no shared strings
+ return []string{}, nil
+ }
+ if err != nil {
+ return nil, fmt.Errorf("unable to get shared strings file: %w", err)
+ }
+
+ f, err := ssFile.Open()
+ if err != nil {
+ return nil, fmt.Errorf("unable to open shared strings file: %w", err)
+ }
+
+ defer f.Close()
+
+ var (
+ sharedStrings []string
+ value sharedStringsValue
+ )
+
+ dec := xml.NewDecoder(f)
+ for {
+ token, err := dec.Token()
+ if err == io.EOF {
+ return sharedStrings, nil
+ }
+ if err != nil {
+ return nil, fmt.Errorf("error decoding token: %w", err)
+ }
+
+ startElement, ok := token.(xml.StartElement)
+ if !ok {
+ continue
+ }
+
+ if sharedStrings == nil { // don't use len() == 0 here!
+ sharedStrings = makeSharedStringsSlice(startElement)
+ continue
+ }
+
+ value.Reset()
+ if err := value.unmarshalXML(dec, startElement); err != nil {
+ return nil, fmt.Errorf("error unmarshaling shared strings value %+v: %w", startElement, err)
+ }
+
+ sharedStrings = append(sharedStrings, value.String())
+ }
+}
+
+// makeSharedStringsSlice allocates shared strings slice according to 'count' attribute of root tag
+// absence of attribute doesn't break flow because make(..., 0) is valid
+func makeSharedStringsSlice(rootElem xml.StartElement) []string {
+ var count int
+ for _, attr := range rootElem.Attr {
+ if attr.Name.Local != "count" {
+ continue
+ }
+
+ var err error
+
+ count, err = strconv.Atoi(attr.Value)
+ if err != nil {
+ return []string{}
+ }
+ }
+
+ return make([]string, 0, count)
+}
diff --git a/godo/office/xlsx/sheets.go b/godo/office/xlsx/sheets.go
new file mode 100644
index 0000000..10f7b02
--- /dev/null
+++ b/godo/office/xlsx/sheets.go
@@ -0,0 +1,98 @@
+package xlsx
+
+import (
+ "archive/zip"
+ "encoding/xml"
+ "fmt"
+ "strings"
+)
+
+// workbook is a struct representing the data we care about from the workbook.xml file.
+type workbook struct {
+ Sheets []sheet `xml:"sheets>sheet"`
+}
+
+// sheet is a struct representing the sheet xml element.
+type sheet struct {
+ Name string `xml:"name,attr,omitempty"`
+ RelationshipID string `xml:"http://schemas.openxmlformats.org/officeDocument/2006/relationships id,attr,omitempty"`
+}
+
+// relationships is a struct representing the data we care about from the _rels/workboox.xml.rels file.
+type relationships struct {
+ Relationships []relationship `xml:"Relationship"`
+}
+
+type relationship struct {
+ ID string `xml:"Id,attr,omitempty"`
+ Target string `xml:"Target,attr,omitempty"`
+}
+
+func getFileNameFromRelationships(rels []relationship, s sheet) (string, error) {
+ for _, rel := range rels {
+ if rel.ID == s.RelationshipID {
+ if strings.HasPrefix(rel.Target, "/") {
+ // path is absolute, take all but the leading slash
+ return rel.Target[1:], nil
+ }
+ // path is relative, so needs xl/ adding
+ return "xl/" + rel.Target, nil
+ }
+ }
+ return "", fmt.Errorf("unable to find file with relationship %s", s.RelationshipID)
+}
+
+// getWorksheets loads the workbook.xml file and extracts a list of worksheets, along
+// with a map of the canonical worksheet name to a file descriptor.
+// This will return an error if it is not possible to read the workbook.xml file, or
+// if a worksheet without a file is referenced.
+func getWorksheets(files []*zip.File) ([]string, *map[string]*zip.File, error) {
+ wbFile, err := getFileForName(files, "xl/workbook.xml")
+ if err != nil {
+ return nil, nil, fmt.Errorf("unable to get workbook file: %w", err)
+ }
+ data, err := readFile(wbFile)
+ if err != nil {
+ return nil, nil, fmt.Errorf("unable to read workbook file: %w", err)
+ }
+
+ var wb workbook
+ err = xml.Unmarshal(data, &wb)
+ if err != nil {
+ return nil, nil, fmt.Errorf("unable to parse workbook file: %w", err)
+ }
+
+ relsFile, err := getFileForName(files, "xl/_rels/workbook.xml.rels")
+ if err != nil {
+ return nil, nil, fmt.Errorf("unable to get relationships file: %w", err)
+ }
+ relsData, err := readFile(relsFile)
+ if err != nil {
+ return nil, nil, fmt.Errorf("unable to read relationships file: %w", err)
+ }
+
+ rels := relationships{}
+ err = xml.Unmarshal(relsData, &rels)
+ if err != nil {
+ return nil, nil, fmt.Errorf("unable to parse relationships file: %w", err)
+ }
+
+ wsFileMap := map[string]*zip.File{}
+ sheetNames := make([]string, len(wb.Sheets))
+
+ for i, sheet := range wb.Sheets {
+ sheetFilename, err := getFileNameFromRelationships(rels.Relationships, sheet)
+ if err != nil {
+ return nil, nil, fmt.Errorf("unable to get file name from relationships: %w", err)
+ }
+ sheetFile, err := getFileForName(files, sheetFilename)
+ if err != nil {
+ return nil, nil, fmt.Errorf("unable to get file for sheet name %s: %w", sheetFilename, err)
+ }
+
+ wsFileMap[sheet.Name] = sheetFile
+ sheetNames[i] = sheet.Name
+ }
+
+ return sheetNames, &wsFileMap, nil
+}
diff --git a/godo/office/xlsx/styles.go b/godo/office/xlsx/styles.go
new file mode 100644
index 0000000..11479d1
--- /dev/null
+++ b/godo/office/xlsx/styles.go
@@ -0,0 +1,89 @@
+package xlsx
+
+import (
+ "archive/zip"
+ "encoding/xml"
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+// styleSheet defines a struct containing the information we care about from the styles.xml file.
+type styleSheet struct {
+ NumberFormats []numberFormat `xml:"numFmts>numFmt,omitempty"`
+ CellStyles []cellStyle `xml:"cellXfs>xf,omitempty"`
+}
+
+// numberFormat defines a struct containing the format strings for numerical styles.
+type numberFormat struct {
+ NumberFormatID int `xml:"numFmtId,attr,omitempty"`
+ FormatCode string `xml:"formatCode,attr,omitempty"`
+}
+
+// cellStyle defines a struct containing style information for a cell.
+type cellStyle struct {
+ NumberFormatID int `xml:"numFmtId,attr"`
+}
+
+// getFormatCode returns the format string for a given format ID.
+// If the format code is not found, it returns an empty string.
+func getFormatCode(ID int, numberFormats []numberFormat) string {
+ for _, nf := range numberFormats {
+ if nf.NumberFormatID == ID {
+ return nf.FormatCode
+ }
+ }
+
+ return ""
+}
+
+var formatGroup = regexp.MustCompile(`\[.+?\]|\\.|".*?"`)
+
+// isDateFormatCode determines whether a format code is for a date.
+func isDateFormatCode(formatCode string) bool {
+ c := formatGroup.ReplaceAllString(formatCode, "")
+ return strings.ContainsAny(c, "dmhysDMHYS")
+}
+
+// getDateStylesFromStyleSheet populates a map of all date related styles, based on their
+// style sheet index.
+func getDateStylesFromStyleSheet(ss *styleSheet) *map[int]bool {
+ dateStyles := map[int]bool{}
+
+ for i, style := range ss.CellStyles {
+ if 14 <= style.NumberFormatID && style.NumberFormatID <= 22 {
+ dateStyles[i] = true
+ }
+ if 164 <= style.NumberFormatID {
+ formatCode := getFormatCode(style.NumberFormatID, ss.NumberFormats)
+ if isDateFormatCode(formatCode) {
+ dateStyles[i] = true
+ }
+ }
+ }
+
+ return &dateStyles
+}
+
+// getDateFormatStyles reads the styles XML, and returns a map of all styles that relate to date
+// fields.
+// If the styles sheet cannot be found, or cannot be read, then an error is returned.
+func getDateFormatStyles(files []*zip.File) (*map[int]bool, error) {
+ stylesFile, err := getFileForName(files, "xl/styles.xml")
+ if err != nil {
+ return nil, fmt.Errorf("unable to get styles file: %w", err)
+ }
+
+ data, err := readFile(stylesFile)
+ if err != nil {
+ return nil, fmt.Errorf("unable to read styles file: %w", err)
+ }
+
+ var ss styleSheet
+ err = xml.Unmarshal(data, &ss)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse styles file: %w", err)
+ }
+
+ return getDateStylesFromStyleSheet(&ss), nil
+}
diff --git a/godo/office/xlsx/xml.go b/godo/office/xlsx/xml.go
new file mode 100644
index 0000000..f173a3c
--- /dev/null
+++ b/godo/office/xlsx/xml.go
@@ -0,0 +1,21 @@
+package xlsx
+
+import (
+ "encoding/xml"
+ "fmt"
+)
+
+func getCharData(d *xml.Decoder) (string, error) {
+ tok, err := d.Token()
+ if err != nil {
+ return "", fmt.Errorf("unable to get raw token: %w", err)
+ }
+
+ cdata, ok := tok.(xml.CharData)
+ if !ok {
+ // Valid for no chardata to be present
+ return "", nil
+ }
+
+ return string(cdata), nil
+}
diff --git a/godo/ai/convert/xml.go b/godo/office/xml.go
similarity index 70%
rename from godo/ai/convert/xml.go
rename to godo/office/xml.go
index ea5c111..fca043a 100644
--- a/godo/ai/convert/xml.go
+++ b/godo/office/xml.go
@@ -1,32 +1,21 @@
-/*
- * GodoOS - A lightweight cloud desktop
- * Copyright (C) 2024 https://godoos.com
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program. If not, see .
- */
-package convert
+package office
import (
"bytes"
"encoding/xml"
"fmt"
+ "godo/office/etree"
"io"
+ "os"
)
// ConvertXML converts an XML file to text.
-func ConvertXML(r io.Reader) (string, error) {
- cleanXML, err := Tidy(r)
+func xml2txt(filePath string) (string, error) {
+ file, err := os.Open(filePath)
+ if err != nil {
+ return "", fmt.Errorf("error opening file: %v", err)
+ }
+ cleanXML, err := etree.Tidy(file)
if err != nil {
return "", fmt.Errorf("tidy error: %v", err)
}