Browse Source

change base office

master
godo 7 months ago
parent
commit
8fafaa071b
  1. 1
      frontend/components.d.ts
  2. 37
      godo/ai/convert/doc.go
  3. 148
      godo/ai/convert/doc/clx.go
  4. 258
      godo/ai/convert/doc/doc.go
  5. 174
      godo/ai/convert/doc/fib.go
  6. 234
      godo/ai/convert/doc/plcFld.go
  7. 53
      godo/ai/convert/doc/utf16.go
  8. 178
      godo/ai/convert/docx.go
  9. 336
      godo/ai/convert/find.go
  10. 381
      godo/ai/convert/http.go
  11. 45
      godo/ai/convert/image.go
  12. 153
      godo/ai/convert/img.go
  13. 127
      godo/ai/convert/libs/file.go
  14. 84
      godo/ai/convert/libs/kind.go
  15. 170
      godo/ai/convert/libs/rapid.go
  16. 136
      godo/ai/convert/libs/xpdf.go
  17. 113
      godo/ai/convert/main.go
  18. 114
      godo/ai/convert/main_test.go
  19. 42
      godo/ai/convert/md.go
  20. 39
      godo/ai/convert/pdf.go
  21. 89
      godo/ai/convert/pptx.go
  22. 521
      godo/ai/convert/rtf.go
  23. 31
      godo/ai/convert/txt.go
  24. 53
      godo/ai/convert/url.go
  25. 85
      godo/ai/convert/xlsx.go
  26. 5
      godo/ai/server/chat.go
  27. 4
      godo/ai/server/llms.go
  28. 5
      godo/cmd/main.go
  29. 27
      godo/deps/darwin.go
  30. BIN
      godo/deps/darwin/goconv/pdf/pdfimages
  31. BIN
      godo/deps/darwin/goconv/pdf/pdftohtml
  32. BIN
      godo/deps/darwin/goconv/pdf/pdftopng
  33. BIN
      godo/deps/darwin/goconv/rapid/RapidOcrOnnx
  34. BIN
      godo/deps/darwin/goconv/rapid/models/ch_PP-OCRv4_det_infer-v7.onnx
  35. BIN
      godo/deps/darwin/goconv/rapid/models/ch_PP-OCRv4_rec_infer-v7.onnx
  36. BIN
      godo/deps/darwin/goconv/rapid/models/ch_ppocr_mobile_v2.0_cls_infer.onnx
  37. 6623
      godo/deps/darwin/goconv/rapid/models/ppocr_keys_v1.txt
  38. 101
      godo/deps/extract.go
  39. 27
      godo/deps/linux.go
  40. BIN
      godo/deps/linux/goconv/pdf/pdfimages
  41. BIN
      godo/deps/linux/goconv/pdf/pdftohtml
  42. BIN
      godo/deps/linux/goconv/pdf/pdftopng
  43. BIN
      godo/deps/linux/goconv/rapid/RapidOcrOnnx
  44. BIN
      godo/deps/linux/goconv/rapid/models/ch_PP-OCRv4_det_infer-v7.onnx
  45. BIN
      godo/deps/linux/goconv/rapid/models/ch_PP-OCRv4_rec_infer-v7.onnx
  46. BIN
      godo/deps/linux/goconv/rapid/models/ch_ppocr_mobile_v2.0_cls_infer.onnx
  47. 6623
      godo/deps/linux/goconv/rapid/models/ppocr_keys_v1.txt
  48. 27
      godo/deps/windows.go
  49. BIN
      godo/deps/windows/goconv/pdf/pdfimages.exe
  50. BIN
      godo/deps/windows/goconv/pdf/pdftohtml.exe
  51. BIN
      godo/deps/windows/goconv/pdf/pdftopng.exe
  52. BIN
      godo/deps/windows/goconv/rapid/RapidOcrOnnx.exe
  53. BIN
      godo/deps/windows/goconv/rapid/models/ch_PP-OCRv4_det_infer-v7.onnx
  54. BIN
      godo/deps/windows/goconv/rapid/models/ch_PP-OCRv4_rec_infer-v7.onnx
  55. BIN
      godo/deps/windows/goconv/rapid/models/ch_ppocr_mobile_v2.0_cls_infer.onnx
  56. 6623
      godo/deps/windows/goconv/rapid/models/ppocr_keys_v1.txt
  57. 201
      godo/office/LICENSE
  58. 75
      godo/office/Readme.md
  59. 27
      godo/office/darwin.go
  60. 545
      godo/office/doc.go
  61. 412
      godo/office/docx.go
  62. 31
      godo/office/epub.go
  63. 24
      godo/office/etree/LICENSE
  64. 205
      godo/office/etree/README.md
  65. 1810
      godo/office/etree/etree.go
  66. 394
      godo/office/etree/helpers.go
  67. 595
      godo/office/etree/path.go
  68. 10
      godo/office/etree/tidy.go
  69. 30
      godo/office/html.go
  70. 64
      godo/office/json.go
  71. 27
      godo/office/linux.go
  72. 48
      godo/office/md.go
  73. 36
      godo/office/odt.go
  74. 321
      godo/office/office.go
  75. 201
      godo/office/ole2/LICENSE
  76. 2
      godo/office/ole2/README.md
  77. 35
      godo/office/ole2/dir.go
  78. 42
      godo/office/ole2/header.go
  79. 156
      godo/office/ole2/ole.go
  80. 19
      godo/office/ole2/pss.go
  81. 37
      godo/office/ole2/sector.go
  82. 13
      godo/office/ole2/stream.go
  83. 96
      godo/office/ole2/stream_reader.go
  84. 75
      godo/office/ole2/stream_reader_test.go
  85. 138
      godo/office/pdf/README.md
  86. 49
      godo/office/pdf/ascii85.go
  87. 522
      godo/office/pdf/lex.go
  88. 4286
      godo/office/pdf/name.go
  89. 1047
      godo/office/pdf/page.go
  90. 134
      godo/office/pdf/ps.go
  91. 1111
      godo/office/pdf/read.go
  92. 154
      godo/office/pdf/text.go
  93. 358
      godo/office/ppt.go
  94. 208
      godo/office/pptx.go
  95. 164
      godo/office/reader.go
  96. 362
      godo/office/rtf.go
  97. 41
      godo/office/txt.go
  98. 46
      godo/office/types.go
  99. 27
      godo/office/windows.go
  100. 125
      godo/office/xls.go

1
frontend/components.d.ts

@ -100,6 +100,7 @@ declare module 'vue' {
ElRow: typeof import('element-plus/es')['ElRow']
ElScrollbar: typeof import('element-plus/es')['ElScrollbar']
ElSelect: typeof import('element-plus/es')['ElSelect']
ElSelectV2: typeof import('element-plus/es')['ElSelectV2']
ElSlider: typeof import('element-plus/es')['ElSlider']
ElSpace: typeof import('element-plus/es')['ElSpace']
ElSwitch: typeof import('element-plus/es')['ElSwitch']

37
godo/ai/convert/doc.go

@ -1,37 +0,0 @@
/*
* GodoOS - A lightweight cloud desktop
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package convert
import (
"bytes"
"io"
"godo/ai/convert/doc"
)
// ConvertDoc converts an MS Word .doc to text.
func ConvertDoc(r io.Reader) (string, error) {
buf, err := doc.ParseDoc(r)
if err != nil {
return "", err
}
return buf.(*bytes.Buffer).String(), nil
}

148
godo/ai/convert/doc/clx.go

@ -1,148 +0,0 @@
/*
* GodoOS - A lightweight cloud desktop
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package doc
import (
"encoding/binary"
"errors"
"github.com/richardlehane/mscfb"
)
var (
errInvalidPrc = errors.New("Invalid Prc structure")
errInvalidClx = errors.New("expected last aCP value to equal fib.cpLength (2.8.35)")
errInvalidPcdt = errors.New("expected clxt to be equal 0x02")
)
type clx struct {
pcdt pcdt
}
type pcdt struct {
lcb int
PlcPcd plcPcd
}
type plcPcd struct {
aCP []int
aPcd []pcd
}
type pcd struct {
fc fcCompressed
}
type fcCompressed struct {
fc int
fCompressed bool
}
// read Clx (section 2.9.38)
func getClx(table *mscfb.File, fib *fib) (*clx, error) {
if table == nil || fib == nil {
return nil, errInvalidArgument
}
b, err := readClx(table, fib)
if err != nil {
return nil, err
}
pcdtOffset, err := getPrcArrayEnd(b)
if err != nil {
return nil, err
}
pcdt, err := getPcdt(b, pcdtOffset)
if err != nil {
return nil, err
}
if pcdt.PlcPcd.aCP[len(pcdt.PlcPcd.aCP)-1] != fib.fibRgLw.cpLength {
return nil, errInvalidClx
}
return &clx{pcdt: *pcdt}, nil
}
func readClx(table *mscfb.File, fib *fib) ([]byte, error) {
b := make([]byte, fib.fibRgFcLcb.lcbClx)
_, err := table.ReadAt(b, int64(fib.fibRgFcLcb.fcClx))
if err != nil {
return nil, err
}
return b, nil
}
// read Pcdt from Clx (section 2.9.178)
func getPcdt(clx []byte, pcdtOffset int) (*pcdt, error) {
const pcdSize = 8
if clx[pcdtOffset] != 0x02 { // clxt must be 0x02 or invalid
return nil, errInvalidPcdt
}
lcb := int(binary.LittleEndian.Uint32(clx[pcdtOffset+1 : pcdtOffset+5])) // skip clxt, get lcb
plcPcdOffset := pcdtOffset + 5 // skip clxt and lcb
numPcds := (lcb - 4) / (4 + pcdSize) // see 2.2.2 in the spec for equation
numCps := numPcds + 1 // always 1 more cp than pcds
cps := make([]int, numCps)
for i := 0; i < numCps; i++ {
cpOffset := plcPcdOffset + i*4
cps[i] = int(binary.LittleEndian.Uint32(clx[cpOffset : cpOffset+4]))
}
pcdStart := plcPcdOffset + 4*numCps
pcds := make([]pcd, numPcds)
for i := 0; i < numPcds; i++ {
pcdOffset := pcdStart + i*pcdSize
pcds[i] = *parsePcd(clx[pcdOffset : pcdOffset+pcdSize])
}
return &pcdt{lcb: lcb, PlcPcd: plcPcd{aCP: cps, aPcd: pcds}}, nil
}
// find end of RgPrc array (section 2.9.38)
func getPrcArrayEnd(clx []byte) (int, error) {
prcOffset := 0
count := 0
for {
clxt := clx[prcOffset]
if clxt != 0x01 { // this is not a Prc, so exit
return prcOffset, nil
}
prcDataCbGrpprl := binary.LittleEndian.Uint16(clx[prcOffset+1 : prcOffset+3]) // skip the clxt and read 2 bytes
prcOffset += 1 + 2 + int(prcDataCbGrpprl) // skip clxt, cbGrpprl, and GrpPrl
if count > 10000 || prcDataCbGrpprl <= 0 || prcOffset+3 > len(clx) { // ensure no infinite loop
return 0, errInvalidPrc
}
count++
}
}
// parse Pcd (section 2.9.177)
func parsePcd(pcdData []byte) *pcd {
return &pcd{fc: *parseFcCompressed(pcdData[2:6])}
}
// parse FcCompressed (section 2.9.73)
func parseFcCompressed(fcData []byte) *fcCompressed {
fCompressed := fcData[3]&64 == 64 // check fcompressed value (second bit from lestmost of the last byte in fcdata)
fcData[3] = fcData[3] & 63 // clear the fcompressed value from data
fc := binary.LittleEndian.Uint32(fcData) // word doc generally uses little endian order (1.3.7)
return &fcCompressed{fc: int(fc), fCompressed: fCompressed}
}

258
godo/ai/convert/doc/doc.go

@ -1,258 +0,0 @@
/*
* GodoOS - A lightweight cloud desktop
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package doc
import (
"bytes"
"encoding/binary"
"errors"
"io"
"unicode/utf16"
"github.com/mattetti/filebuffer"
"github.com/richardlehane/mscfb"
)
var (
errTable = errors.New("cannot find table stream")
errDocEmpty = errors.New("WordDocument not found")
errDocShort = errors.New("wordDoc block too short")
errInvalidArgument = errors.New("invalid table and/or fib")
)
type allReader interface {
io.Closer
io.ReaderAt
io.ReadSeeker
}
func wrapError(e error) error {
return errors.New("Error processing file: " + e.Error())
}
// ParseDoc converts a standard io.Reader from a Microsoft Word
// .doc binary file and returns a reader (actually a bytes.Buffer)
// which will output the plain text found in the .doc file
func ParseDoc(r io.Reader) (io.Reader, error) {
ra, ok := r.(io.ReaderAt)
if !ok {
ra, _, err := toMemoryBuffer(r)
if err != nil {
return nil, wrapError(err)
}
defer ra.Close()
}
d, err := mscfb.New(ra)
if err != nil {
return nil, wrapError(err)
}
wordDoc, table0, table1 := getWordDocAndTables(d)
fib, err := getFib(wordDoc)
if err != nil {
return nil, wrapError(err)
}
table := getActiveTable(table0, table1, fib)
if table == nil {
return nil, wrapError(errTable)
}
clx, err := getClx(table, fib)
if err != nil {
return nil, wrapError(err)
}
return getText(wordDoc, clx)
}
func toMemoryBuffer(r io.Reader) (allReader, int64, error) {
var b bytes.Buffer
size, err := b.ReadFrom(r)
if err != nil {
return nil, 0, err
}
fb := filebuffer.New(b.Bytes())
return fb, size, nil
}
func getText(wordDoc *mscfb.File, clx *clx) (io.Reader, error) {
//var buf bytes.Buffer
var buf utf16Buffer
for i := 0; i < len(clx.pcdt.PlcPcd.aPcd); i++ {
pcd := clx.pcdt.PlcPcd.aPcd[i]
cp := clx.pcdt.PlcPcd.aCP[i]
cpNext := clx.pcdt.PlcPcd.aCP[i+1]
//var start, end, size int
var start, end int
if pcd.fc.fCompressed {
//size = 1
start = pcd.fc.fc / 2
end = start + cpNext - cp
} else {
//size = 2
start = pcd.fc.fc
end = start + 2*(cpNext-cp)
}
b := make([]byte, end-start)
//_, err := wordDoc.ReadAt(b, int64(start/size)) // read all the characters
_, err := wordDoc.ReadAt(b, int64(start))
if err != nil {
return nil, err
}
translateText(b, &buf, pcd.fc.fCompressed)
}
//return &buf, nil
runes := utf16.Decode(buf.Chars())
var out bytes.Buffer
out.Grow(len(runes))
for _, r := range runes {
if r == 7 { // table column separator
r = ' '
} else if r < 32 && r != 9 && r != 10 && r != 13 { // skip non-printable ASCII characters
continue
}
out.WriteRune(r)
}
return &out, nil
}
func translateText(b []byte, buf *utf16Buffer, fCompressed bool) {
fieldLevel := 0
var isFieldChar bool
for cIndex := range b {
// Handle special field characters (section 2.8.25)
if b[cIndex] == 0x13 {
isFieldChar = true
fieldLevel++
continue
} else if b[cIndex] == 0x14 {
isFieldChar = false
continue
} else if b[cIndex] == 0x15 {
isFieldChar = false
continue
} else if isFieldChar {
continue
}
// if b[cIndex] == 7 { // table column separator
// buf.WriteByte(' ')
// continue
// } else if b[cIndex] < 32 && b[cIndex] != 9 && b[cIndex] != 10 && b[cIndex] != 13 { // skip non-printable ASCII characters
// //buf.Write([]byte(fmt.Sprintf("|%#x|", b[cIndex])))
// continue
// }
if fCompressed { // compressed, so replace compressed characters
buf.Write(replaceCompressed(b[cIndex]))
} else {
//buf.Write(b)
buf.WriteByte(b[cIndex])
}
}
}
func replaceCompressed(char byte) []byte {
var v uint16
switch char {
case 0x82:
v = 0x201A
case 0x83:
v = 0x0192
case 0x84:
v = 0x201E
case 0x85:
v = 0x2026
case 0x86:
v = 0x2020
case 0x87:
v = 0x2021
case 0x88:
v = 0x02C6
case 0x89:
v = 0x2030
case 0x8A:
v = 0x0160
case 0x8B:
v = 0x2039
case 0x8C:
v = 0x0152
case 0x91:
v = 0x2018
case 0x92:
v = 0x2019
case 0x93:
v = 0x201C
case 0x94:
v = 0x201D
case 0x95:
v = 0x2022
case 0x96:
v = 0x2013
case 0x97:
v = 0x2014
case 0x98:
v = 0x02DC
case 0x99:
v = 0x2122
case 0x9A:
v = 0x0161
case 0x9B:
v = 0x203A
case 0x9C:
v = 0x0153
case 0x9F:
v = 0x0178
default:
//return []byte{char}
return []byte{char, 0x00}
}
out := make([]byte, 2)
binary.LittleEndian.PutUint16(out, v)
return out
}
func getWordDocAndTables(r *mscfb.Reader) (*mscfb.File, *mscfb.File, *mscfb.File) {
var wordDoc, table0, table1 *mscfb.File
for i := 0; i < len(r.File); i++ {
stream := r.File[i]
switch stream.Name {
case "WordDocument":
wordDoc = stream
case "0Table":
table0 = stream
case "1Table":
table1 = stream
}
}
return wordDoc, table0, table1
}
func getActiveTable(table0 *mscfb.File, table1 *mscfb.File, f *fib) *mscfb.File {
if f.base.fWhichTblStm == 0 {
return table0
}
return table1
}

174
godo/ai/convert/doc/fib.go

@ -1,174 +0,0 @@
/*
* GodoOS - A lightweight cloud desktop
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package doc
import (
"encoding/binary"
"errors"
"github.com/richardlehane/mscfb"
)
var (
errFibInvalid = errors.New("file information block validation failed")
)
type fib struct {
base fibBase
csw int
fibRgW fibRgW
cslw int
fibRgLw fibRgLw
cbRgFcLcb int
fibRgFcLcb fibRgFcLcb
}
type fibBase struct {
fWhichTblStm int
}
type fibRgW struct {
}
type fibRgLw struct {
ccpText int
ccpFtn int
ccpHdd int
ccpMcr int
ccpAtn int
ccpEdn int
ccpTxbx int
ccpHdrTxbx int
cpLength int
}
type fibRgFcLcb struct {
fcPlcfFldMom int
lcbPlcfFldMom int
fcPlcfFldHdr int
lcbPlcfFldHdr int
fcPlcfFldFtn int
lcbPlcfFldFtn int
fcPlcfFldAtn int
lcbPlcfFldAtn int
fcClx int
lcbClx int
}
// parse File Information Block (section 2.5.1)
func getFib(wordDoc *mscfb.File) (*fib, error) {
if wordDoc == nil {
return nil, errDocEmpty
}
b := make([]byte, 898) // get FIB block up to FibRgFcLcb97
_, err := wordDoc.ReadAt(b, 0)
if err != nil {
return nil, err
}
fibBase := getFibBase(b[0:32])
fibRgW, csw, err := getFibRgW(b, 32)
if err != nil {
return nil, err
}
fibRgLw, cslw, err := getFibRgLw(b, 34+csw)
if err != nil {
return nil, err
}
fibRgFcLcb, cbRgFcLcb, err := getFibRgFcLcb(b, 34+csw+2+cslw)
return &fib{base: *fibBase, csw: csw, cslw: cslw, fibRgW: *fibRgW, fibRgLw: *fibRgLw, fibRgFcLcb: *fibRgFcLcb, cbRgFcLcb: cbRgFcLcb}, err
}
// parse FibBase (section 2.5.2)
func getFibBase(fib []byte) *fibBase {
byt := fib[11] // fWhichTblStm is 2nd highest bit in this byte
fWhichTblStm := int(byt >> 1 & 1) // set which table (0Table or 1Table) is the table stream
return &fibBase{fWhichTblStm: fWhichTblStm}
}
func getFibRgW(fib []byte, start int) (*fibRgW, int, error) {
if start+2 >= len(fib) { // must be big enough for csw
return &fibRgW{}, 0, errFibInvalid
}
csw := int(binary.LittleEndian.Uint16(fib[start:start+2])) * 2 // in bytes
return &fibRgW{}, csw, nil
}
// parse FibRgLw (section 2.5.4)
func getFibRgLw(fib []byte, start int) (*fibRgLw, int, error) {
fibRgLwStart := start + 2 // skip cslw
if fibRgLwStart+88 >= len(fib) { // expect 88 bytes in fibRgLw
return &fibRgLw{}, 0, errFibInvalid
}
cslw := getInt16(fib, start) * 4 // in bytes
ccpText := getInt(fib, fibRgLwStart+3*4)
ccpFtn := getInt(fib, fibRgLwStart+4*4)
ccpHdd := getInt(fib, fibRgLwStart+5*4)
ccpMcr := getInt(fib, fibRgLwStart+6*4)
ccpAtn := getInt(fib, fibRgLwStart+7*4)
ccpEdn := getInt(fib, fibRgLwStart+8*4)
ccpTxbx := getInt(fib, fibRgLwStart+9*4)
ccpHdrTxbx := getInt(fib, fibRgLwStart+10*4)
// calculate cpLength. Used in PlcPcd verification (see section 2.8.35)
var cpLength int
if ccpFtn != 0 || ccpHdd != 0 || ccpMcr != 0 || ccpAtn != 0 || ccpEdn != 0 || ccpTxbx != 0 || ccpHdrTxbx != 0 {
cpLength = ccpFtn + ccpHdd + ccpMcr + ccpAtn + ccpEdn + ccpTxbx + ccpHdrTxbx + ccpText + 1
} else {
cpLength = ccpText
}
return &fibRgLw{ccpText: ccpText, ccpFtn: ccpFtn, ccpHdd: ccpHdd, ccpMcr: ccpMcr, ccpAtn: ccpAtn,
ccpEdn: ccpEdn, ccpTxbx: ccpTxbx, ccpHdrTxbx: ccpHdrTxbx, cpLength: cpLength}, cslw, nil
}
// parse FibRgFcLcb (section 2.5.5)
func getFibRgFcLcb(fib []byte, start int) (*fibRgFcLcb, int, error) {
fibRgFcLcbStart := start + 2 // skip cbRgFcLcb
if fibRgFcLcbStart+186*4 < len(fib) { // expect 186+ values in FibRgFcLcb
return &fibRgFcLcb{}, 0, errFibInvalid
}
cbRgFcLcb := getInt16(fib, start)
fcPlcfFldMom := getInt(fib, fibRgFcLcbStart+32*4)
lcbPlcfFldMom := getInt(fib, fibRgFcLcbStart+33*4)
fcPlcfFldHdr := getInt(fib, fibRgFcLcbStart+34*4)
lcbPlcfFldHdr := getInt(fib, fibRgFcLcbStart+35*4)
fcPlcfFldFtn := getInt(fib, fibRgFcLcbStart+36*4)
lcbPlcfFldFtn := getInt(fib, fibRgFcLcbStart+37*4)
fcPlcfFldAtn := getInt(fib, fibRgFcLcbStart+38*4)
lcbPlcfFldAtn := getInt(fib, fibRgFcLcbStart+39*4)
fcClx := getInt(fib, fibRgFcLcbStart+66*4)
lcbClx := getInt(fib, fibRgFcLcbStart+67*4)
return &fibRgFcLcb{fcPlcfFldMom: fcPlcfFldMom, lcbPlcfFldMom: lcbPlcfFldMom, fcPlcfFldHdr: fcPlcfFldHdr, lcbPlcfFldHdr: lcbPlcfFldHdr,
fcPlcfFldFtn: fcPlcfFldFtn, lcbPlcfFldFtn: lcbPlcfFldFtn, fcPlcfFldAtn: fcPlcfFldAtn, lcbPlcfFldAtn: lcbPlcfFldAtn,
fcClx: fcClx, lcbClx: lcbClx}, cbRgFcLcb, nil
}
func getInt16(buf []byte, start int) int {
return int(binary.LittleEndian.Uint16(buf[start : start+2]))
}
func getInt(buf []byte, start int) int {
return int(binary.LittleEndian.Uint32(buf[start : start+4]))
}

234
godo/ai/convert/doc/plcFld.go

@ -1,234 +0,0 @@
/*
* GodoOS - A lightweight cloud desktop
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package doc
/* I don't think I'm going to need this
type plcFld struct {
aCp []int
aFld []fld
}
type fld struct {
fldch int
grffld int
fieldtype string
fNested bool
fHasSep bool
}
func getPlcFld(table *mscfb.File, offset, size int) (*plcFld, error) {
if table == nil {
return nil, errInvalidArgument
}
b := make([]byte, size)
_, err := table.ReadAt(b, int64(offset))
if err != nil {
return nil, err
}
f, err := getFld(b)
if err != nil {
return nil, err
}
return f, nil
}
func getFld(plc []byte) (*plcFld, error) {
return nil, nil
}
func getFieldType(grffld byte) string {
switch grffld {
case 0x01:
return "Not Named"
case 0x02:
return "Not Named"
case 0x03:
return "REF"
case 0x05:
return "FTNREF"
case 0x06:
return "SET"
case 0x07:
return "IF"
case 0x08:
return "INDEX"
case 0x0A:
return "STYLEREF"
case 0x0C:
return "SEQ"
case 0x0D:
return "TOC"
case 0x0E:
return "INFO"
case 0x0F:
return "TITLE"
case 0x10:
return "SUBJECT"
case 0x11:
return "AUTHOR"
case 0x12:
return "KEYWORDS"
case 0x13:
return "COMMENTS"
case 0x14:
return "LASTSAVEDBY"
case 0x15:
return "CREATEDATE"
case 0x16:
return "SAVEDATE"
case 0x17:
return "PRINTDATE"
case 0x18:
return "REVNUM"
case 0x19:
return "EDITTIME"
case 0x1A:
return "NUMPAGES"
case 0x1B:
return "NUMWORDS"
case 0x1C:
return "NUMCHARS"
case 0x1D:
return "FILENAME"
case 0x1E:
return "TEMPLATE"
case 0x1F:
return "DATE"
case 0x20:
return "TIME"
case 0x21:
return "PAGE"
case 0x22:
return "="
case 0x23:
return "QUOTE"
case 0x24:
return "INCLUDE"
case 0x25:
return "PAGEREF"
case 0x26:
return "ASK"
case 0x27:
return "FILLIN"
case 0x28:
return "DATA"
case 0x29:
return "NEXT"
case 0x2A:
return "NEXTIF"
case 0x2B:
return "SKIPIF"
case 0x2C:
return "MERGEREC"
case 0x2D:
return "DDE"
case 0x2E:
return "DDEAUTO"
case 0x2F:
return "GLOSSARY"
case 0x30:
return "PRINT"
case 0x31:
return "EQ"
case 0x32:
return "GOTOBUTTON"
case 0x33:
return "MACROBUTTON"
case 0x34:
return "AUTONUMOUT"
case 0x35:
return "AUTONUMLGL"
case 0x36:
return "AUTONUM"
case 0x37:
return "IMPORT"
case 0x38:
return "LINK"
case 0x39:
return "SYMBOL"
case 0x3A:
return "EMBED"
case 0x3B:
return "MERGEFIELD"
case 0x3C:
return "USERNAME"
case 0x3D:
return "USERINITIALS"
case 0x3E:
return "USERADDRESS"
case 0x3F:
return "BARCODE"
case 0x40:
return "DOCVARIABLE"
case 0x41:
return "SECTION"
case 0x42:
return "SECTIONPAGES"
case 0x43:
return "INCLUDEPICTURE"
case 0x44:
return "INCLUDETEXT"
case 0x45:
return "FILESIZE"
case 0x46:
return "FORMTEXT"
case 0x47:
return "FORMCHECKBOX"
case 0x48:
return "NOTEREF"
case 0x49:
return "TOA"
case 0x4B:
return "MERGESEQ"
case 0x4F:
return "AUTOTEXT"
case 0x50:
return "COMPARE"
case 0x51:
return "ADDIN"
case 0x53:
return "FORMDROPDOWN"
case 0x54:
return "ADVANCE"
case 0x55:
return "DOCPROPERTY"
case 0x57:
return "CONTROL"
case 0x58:
return "HYPERLINK"
case 0x59:
return "AUTOTEXTLIST"
case 0x5A:
return "LISTNUM"
case 0x5B:
return "HTMLCONTROL"
case 0x5C:
return "BIDIOUTLINE"
case 0x5D:
return "ADDRESSBLOCK"
case 0x5E:
return "GREETINGLINE"
case 0x5F:
return "SHAPE"
default:
return "UNKNOWN"
}
}
*/

53
godo/ai/convert/doc/utf16.go

@ -1,53 +0,0 @@
/*
* GodoOS - A lightweight cloud desktop
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package doc
import (
"encoding/binary"
)
type utf16Buffer struct {
haveReadLowerByte bool
char [2]byte
data []uint16
}
func (buf *utf16Buffer) Write(p []byte) (n int, err error) {
for i := range p {
buf.WriteByte(p[i])
}
return len(p), nil
}
func (buf *utf16Buffer) WriteByte(b byte) error {
if buf.haveReadLowerByte {
buf.char[1] = b
buf.data = append(buf.data, binary.LittleEndian.Uint16(buf.char[:]))
} else {
buf.char[0] = b
}
buf.haveReadLowerByte = !buf.haveReadLowerByte
return nil
}
func (buf *utf16Buffer) Chars() []uint16 {
if buf.haveReadLowerByte {
return append(buf.data, uint16(buf.char[0]))
}
return buf.data
}

178
godo/ai/convert/docx.go

@ -1,178 +0,0 @@
/*
* GodoOS - A lightweight cloud desktop
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package convert
import (
"archive/zip"
"bytes"
"encoding/xml"
"fmt"
"io"
"os"
"time"
)
type typeOverride struct {
XMLName xml.Name `xml:"Override"`
ContentType string `xml:"ContentType,attr"`
PartName string `xml:"PartName,attr"`
}
type contentTypeDefinition struct {
XMLName xml.Name `xml:"Types"`
Overrides []typeOverride `xml:"Override"`
}
// ConvertDocx converts an MS Word docx file to text.
func ConvertDocx(r io.Reader) (string, error) {
var size int64
// Common case: if the reader is a file (or trivial wrapper), avoid
// loading it all into memory.
var ra io.ReaderAt
if f, ok := r.(interface {
io.ReaderAt
Stat() (os.FileInfo, error)
}); ok {
si, err := f.Stat()
if err != nil {
return "", err
}
size = si.Size()
ra = f
} else {
b, err := io.ReadAll(io.LimitReader(r, maxBytes))
if err != nil {
return "", fmt.Errorf("error read data: %v", err)
}
size = int64(len(b))
ra = bytes.NewReader(b)
}
zr, err := zip.NewReader(ra, size)
if err != nil {
return "", fmt.Errorf("error unzipping data: %v", err)
}
zipFiles := mapZipFiles(zr.File)
contentTypeDefinition, err := getContentTypeDefinition(zipFiles["[Content_Types].xml"])
if err != nil {
return "", err
}
meta := make(map[string]string)
var textHeader, textBody, textFooter string
for _, override := range contentTypeDefinition.Overrides {
f := zipFiles[override.PartName]
switch {
case override.ContentType == "application/vnd.openxmlformats-package.core-properties+xml":
rc, err := f.Open()
if err != nil {
return "", fmt.Errorf("error opening '%v' from archive: %v", f.Name, err)
}
defer rc.Close()
meta, err = XMLToMap(rc)
if err != nil {
return "", fmt.Errorf("error parsing '%v': %v", f.Name, err)
}
if tmp, ok := meta["modified"]; ok {
if t, err := time.Parse(time.RFC3339, tmp); err == nil {
meta["ModifiedDate"] = fmt.Sprintf("%d", t.Unix())
}
}
if tmp, ok := meta["created"]; ok {
if t, err := time.Parse(time.RFC3339, tmp); err == nil {
meta["CreatedDate"] = fmt.Sprintf("%d", t.Unix())
}
}
case override.ContentType == "application/vnd.openxmlformats-officedocument.wordprocessingml.document.main+xml":
body, err := parseDocxText(f)
if err != nil {
return "", err
}
textBody += body + "\n"
case override.ContentType == "application/vnd.openxmlformats-officedocument.wordprocessingml.footer+xml":
footer, err := parseDocxText(f)
if err != nil {
return "", err
}
textFooter += footer + "\n"
case override.ContentType == "application/vnd.openxmlformats-officedocument.wordprocessingml.header+xml":
header, err := parseDocxText(f)
if err != nil {
return "", err
}
textHeader += header + "\n"
}
}
// 在成功解析ZIP文件后,添加图片提取逻辑
images, err := findImagesInZip(zr)
if err != nil {
fmt.Printf("Error extracting images: %v", err)
}
fmt.Printf("Images: %v", images)
return textHeader + "\n" + textBody + "\n" + textFooter, nil
}
func getContentTypeDefinition(zf *zip.File) (*contentTypeDefinition, error) {
f, err := zf.Open()
if err != nil {
return nil, err
}
defer f.Close()
x := &contentTypeDefinition{}
if err := xml.NewDecoder(io.LimitReader(f, maxBytes)).Decode(x); err != nil {
return nil, err
}
return x, nil
}
func mapZipFiles(files []*zip.File) map[string]*zip.File {
filesMap := make(map[string]*zip.File, 2*len(files))
for _, f := range files {
filesMap[f.Name] = f
filesMap["/"+f.Name] = f
}
return filesMap
}
func parseDocxText(f *zip.File) (string, error) {
r, err := f.Open()
if err != nil {
return "", fmt.Errorf("error opening '%v' from archive: %v", f.Name, err)
}
defer r.Close()
text, err := DocxXMLToText(r)
if err != nil {
return "", fmt.Errorf("error parsing '%v': %v", f.Name, err)
}
return text, nil
}
// DocxXMLToText converts Docx XML into plain text.
func DocxXMLToText(r io.Reader) (string, error) {
return XMLToText(r, []string{"br", "p", "tab"}, []string{"instrText", "script"}, true)
}

336
godo/ai/convert/find.go

@ -1,336 +0,0 @@
package convert
import (
"archive/zip"
"encoding/xml"
"fmt"
"io"
"log"
"os"
"path/filepath"
"strings"
"godo/libs"
)
// 常见图片扩展名列表
var imageExtensions = []string{".jpg", ".jpeg", ".png", ".gif", ".bmp", ".webp", ".tif", ".tiff"}
// 直接遍历ZIP并查找可能的图片文件
func findImagesInZip(zr *zip.Reader) ([]string, error) {
var images []string
cacheDir := libs.GetCacheDir()
for _, f := range zr.File {
if isImageFile(f.Name) {
images = append(images, f.Name)
if err := extractImageToCache(zr, f.Name, cacheDir); err != nil {
log.Printf("Error extracting image %s to cache: %v", f.Name, err)
}
}
}
return images, nil
}
// 判断文件是否为图片文件
func isImageFile(fileName string) bool {
ext := strings.ToLower(filepath.Ext(fileName))
for _, imgExt := range imageExtensions {
if ext == imgExt {
return true
}
}
return false
}
// 从zip中提取图片到缓存目录
func extractImageToCache(zr *zip.Reader, imageName, cacheDir string) error {
fileInZip, err := getFileByName(zr.File, imageName)
if err != nil {
return err
}
rc, err := fileInZip.Open()
if err != nil {
return fmt.Errorf("failed to open file %s in zip: %w", imageName, err)
}
defer rc.Close()
justFileName := filepath.Base(imageName)
outFilePath := filepath.Join(cacheDir, justFileName)
outFile, err := os.Create(outFilePath)
if err != nil {
return fmt.Errorf("failed to create file %s: %w", outFilePath, err)
}
defer outFile.Close()
_, err = io.Copy(outFile, rc)
if err != nil {
return fmt.Errorf("failed to copy image content to %s: %w", outFilePath, err)
}
// 提取图片周围的文本内容
textContent, err := getSurroundingTextForOffice(zr, imageName)
if err != nil {
log.Printf("Error getting surrounding text for image %s: %v", imageName, err)
} else {
textFilePath := filepath.Join(cacheDir, strings.TrimSuffix(justFileName, filepath.Ext(justFileName))+".txt")
if err := saveTextToFile(textContent, textFilePath); err != nil {
log.Printf("Error saving text to file %s: %v", textFilePath, err)
}
}
return nil
}
// 从zip.File数组中根据文件名查找并返回对应的文件
func getFileByName(files []*zip.File, name string) (*zip.File, error) {
for _, file := range files {
if file.Name == name {
return file, nil
}
}
return nil, fmt.Errorf("file %s not found in zip archive", name)
}
// 获取 .pptx, .xlsx 或 .docx 文件中图片周围的文本内容
func getSurroundingTextForOffice(zr *zip.Reader, imageName string) (string, error) {
imageDir := filepath.Dir(imageName)
xmlFiles, err := findRelevantXMLFiles(zr, imageDir)
if err != nil {
return "", err
}
for _, xmlFile := range xmlFiles {
fileInZip, err := getFileByName(zr.File, xmlFile)
if err != nil {
continue
}
rc, err := fileInZip.Open()
if err != nil {
continue
}
defer rc.Close()
doc, err := parseXMLDocument(rc, imageDir)
if err != nil {
continue
}
surroundingText := getSurroundingText(doc, filepath.Base(imageName))
if surroundingText != "" {
return truncateText(surroundingText), nil
}
}
return "", fmt.Errorf("no surrounding text found for image %s", imageName)
}
// 查找相关的XML文件
func findRelevantXMLFiles(zr *zip.Reader, imageDir string) ([]string, error) {
switch {
case strings.Contains(imageDir, "ppt/media"):
return findFilesByPattern(zr, "ppt/slides/slide*.xml"), nil
case strings.Contains(imageDir, "xl/media"):
return findFilesByPattern(zr, "xl/worksheets/sheet*.xml"), nil
case strings.Contains(imageDir, "word/media"):
return []string{"word/document.xml"}, nil
default:
return nil, fmt.Errorf("unknown image directory %s", imageDir)
}
}
// 解析XML文档
func parseXMLDocument(rc io.ReadCloser, imageDir string) (interface{}, error) {
var doc interface{}
switch {
case strings.Contains(imageDir, "ppt/media"):
doc = &PPTXDocument{}
case strings.Contains(imageDir, "xl/media"):
doc = &XLSXDocument{}
case strings.Contains(imageDir, "word/media"):
doc = &DOCXDocument{}
default:
return nil, fmt.Errorf("unknown image directory %s", imageDir)
}
if err := xml.NewDecoder(rc).Decode(doc); err != nil {
return nil, err
}
return doc, nil
}
// 获取图片周围的文本内容
func getSurroundingText(doc interface{}, imagePath string) string {
switch d := doc.(type) {
case *PPTXDocument:
for _, slide := range d.Slides {
for _, shape := range slide.Shapes {
if shape.Type == "pic" && shape.ImagePath == imagePath {
return getTextFromSlide(slide)
}
}
}
case *XLSXDocument:
for _, sheet := range d.Sheets {
for _, drawing := range sheet.Drawings {
for _, image := range drawing.Images {
if image.ImagePath == imagePath {
return getTextFromSheet(sheet)
}
}
}
}
case *DOCXDocument:
for _, paragraph := range d.Body.Paragraphs {
for _, run := range paragraph.Runs {
for _, pic := range run.Pictures {
if pic.ImagePath == imagePath {
return getTextFromParagraph(paragraph)
}
}
}
}
}
return ""
}
// 查找符合模式的文件
func findFilesByPattern(zr *zip.Reader, pattern string) []string {
var files []string
for _, f := range zr.File {
if matched, _ := filepath.Match(pattern, f.Name); matched {
files = append(files, f.Name)
}
}
return files
}
// 将文本内容保存到文件
func saveTextToFile(text, filePath string) error {
return os.WriteFile(filePath, []byte(text), 0644)
}
// 截断文本,确保不超过80个字符
func truncateText(text string) string {
if len(text) > 80 {
return text[:80]
}
return text
}
// PPTXDocument 结构体定义
type PPTXDocument struct {
Slides []Slide `xml:"p:sld"`
}
type Slide struct {
Shapes []Shape `xml:"p:cSld>p:spTree>p:sp"`
}
type Shape struct {
Type string `xml:"p:pic"`
ImagePath string `xml:"p:pic>p:blipFill>a:blip/@r:embed"`
Elements []Element `xml:"p:txBody>a:p>a:r"`
}
type Element struct {
Type string `xml:"a:t"`
Value string `xml:",chardata"`
}
// XLSXDocument 结构体定义
type XLSXDocument struct {
Sheets []Sheet `xml:"worksheet"`
}
type Sheet struct {
Rows []Row `xml:"sheetData>row"`
Drawings []Drawing `xml:"drawing"`
}
type Row struct {
Cells []Cell `xml:"c"`
}
type Cell struct {
Value string `xml:"v"`
}
type Drawing struct {
Images []Image `xml:"xdr:pic"`
}
type Image struct {
ImagePath string `xml:"xdr:pic>xdr:blipFill>a:blip/@r:embed"`
}
// DOCXDocument 结构体定义
type DOCXDocument struct {
Body struct {
Paragraphs []Paragraph `xml:"w:p"`
} `xml:"w:body"`
}
type Paragraph struct {
Runs []Run `xml:"w:r"`
}
type Run struct {
Pictures []Picture `xml:"w:drawing"`
Text []Text `xml:"w:t"`
}
type Text struct {
Value string `xml:",chardata"`
}
type Picture struct {
ImagePath string `xml:"wp:docPr/@name"`
}
// 从幻灯片中提取文本
func getTextFromSlide(slide Slide) string {
var text string
for _, shape := range slide.Shapes {
if shape.Type != "pic" {
text += getTextFromShape(shape)
}
}
return text
}
// 从形状中提取文本
func getTextFromShape(shape Shape) string {
var text string
for _, element := range shape.Elements {
text += element.Value
}
return text
}
// 从工作表中提取文本
func getTextFromSheet(sheet Sheet) string {
var text string
for _, row := range sheet.Rows {
for _, cell := range row.Cells {
text += cell.Value
}
}
return text
}
// 从段落中提取文本
func getTextFromParagraph(paragraph Paragraph) string {
var text string
for _, run := range paragraph.Runs {
for _, t := range run.Text {
text += t.Value
}
}
return text
}

381
godo/ai/convert/http.go

@ -1,381 +0,0 @@
// /*
// - GodoOS - A lightweight cloud desktop
// - Copyright (C) 2024 https://godoos.com
// *
// - This program is free software: you can redistribute it and/or modify
// - it under the terms of the GNU Lesser General Public License as published by
// - the Free Software Foundation, either version 2.1 of the License, or
// - (at your option) any later version.
// *
// - This program is distributed in the hope that it will be useful,
// - but WITHOUT ANY WARRANTY; without even the implied warranty of
// - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// - GNU Lesser General Public License for more details.
// *
// - You should have received a copy of the GNU Lesser General Public License
// - along with this program. If not, see <http://www.gnu.org/licenses/>.
// */
package convert
// import (
// "bytes"
// "encoding/json"
// "fmt"
// "godo/libs"
// "io"
// "log"
// "mime"
// "net/http"
// "os"
// "path/filepath"
// "strconv"
// "strings"
// "time"
// )
// // UploadInfo 用于表示上传文件的信息
// type UploadInfo struct {
// Name string `json:"name"`
// SavePath string `json:"save_path"`
// Content string `json:"content"`
// CreatedAt time.Time `json:"created_at"`
// }
// // SaveContentToFile 保存内容到文件并返回UploadInfo结构体
// func SaveContentToFile(content, fileName string) (UploadInfo, error) {
// uploadBaseDir, err := libs.GetUploadDir()
// if err != nil {
// return UploadInfo{}, err
// }
// // 去除文件名中的空格
// fileNameWithoutSpaces := strings.ReplaceAll(fileName, " ", "_")
// fileNameWithoutSpaces = strings.ReplaceAll(fileNameWithoutSpaces, "/", "")
// fileNameWithoutSpaces = strings.ReplaceAll(fileNameWithoutSpaces, `\`, "")
// // 提取文件名和扩展名
// // 查找最后一个点的位置
// lastDotIndex := strings.LastIndexByte(fileNameWithoutSpaces, '.')
// // 如果找到点,则提取扩展名,否则视为没有扩展名
// ext := ""
// if lastDotIndex != -1 {
// ext = fileNameWithoutSpaces[lastDotIndex:]
// fileNameWithoutSpaces = fileNameWithoutSpaces[:lastDotIndex]
// } else {
// ext = ""
// }
// randFileName := fmt.Sprintf("%s_%s%s", fileNameWithoutSpaces, strconv.FormatInt(time.Now().UnixNano(), 10), ext)
// savePath := filepath.Join(uploadBaseDir, time.Now().Format("2006-01-02"), randFileName)
// if err := os.MkdirAll(filepath.Dir(savePath), 0755); err != nil {
// return UploadInfo{}, err
// }
// if err := os.WriteFile(savePath, []byte(content), 0644); err != nil {
// return UploadInfo{}, err
// }
// return UploadInfo{
// Name: fileNameWithoutSpaces,
// SavePath: savePath,
// //Content: content,
// CreatedAt: time.Now(),
// }, nil
// }
// // MultiUploadHandler 处理多文件上传请求
// func MultiUploadHandler(w http.ResponseWriter, r *http.Request) {
// if err := r.ParseMultipartForm(10000 << 20); err != nil {
// libs.Error(w, "Failed to parse multipart form")
// return
// }
// files := r.MultipartForm.File["files"]
// if len(files) == 0 {
// libs.Error(w, "No file parts in the request")
// return
// }
// fileInfoList := make([]UploadInfo, 0, len(files))
// for _, fileHeader := range files {
// file, err := fileHeader.Open()
// if err != nil {
// libs.Error(w, "Failed to open uploaded file")
// continue
// }
// defer file.Close()
// content, err := io.ReadAll(file)
// if err != nil {
// libs.Error(w, "Failed to read uploaded file")
// continue
// }
// //log.Printf(string(content))
// // 保存上传的文件内容
// info, err := SaveContentToFile(string(content), fileHeader.Filename)
// if err != nil {
// libs.Error(w, "Failed to save uploaded file")
// continue
// }
// log.Println(info.SavePath)
// // 对上传的文件进行转换处理
// convertData := Convert(info.SavePath) // Assuming convert.Convert expects a file path
// log.Printf("convertData: %v", convertData)
// if convertData.Data == "" {
// continue
// }
// images := []ImagesInfo{}
// resInfo := ResContentInfo{
// Content: convertData.Data,
// Images: images,
// }
// // 将转换后的数据写入文件
// savePath := info.SavePath + "_result.json"
// // if err := WriteConvertedDataToFile(convertData.Data, savePath); err != nil {
// // serv.Err("Failed to write converted data to file", w)
// // continue
// // }
// // 使用 json.MarshalIndent 直接获取内容的字节切片
// contents, err := json.MarshalIndent(resInfo, "", " ")
// if err != nil {
// libs.Error(w, "failed to marshal reqBodies to JSON:"+savePath)
// continue
// }
// // 将字节切片直接写入文件
// if err := os.WriteFile(savePath, contents, 0644); err != nil {
// libs.Error(w, "failed to write to file:"+savePath)
// continue
// }
// //info.SavePath = savePath
// fileInfoList = append(fileInfoList, info)
// }
// libs.Success(w, fileInfoList, "success")
// }
// // WriteConvertedDataToFile 将转换后的数据写入文件
// func WriteConvertedDataToFile(data, filePath string) error {
// file, err := os.Create(filePath)
// if err != nil {
// return err
// }
// defer file.Close()
// _, err = file.WriteString(data)
// if err != nil {
// return err
// }
// fmt.Printf("Successfully wrote %d bytes to file %s.\n", len(data), filePath)
// return nil
// }
// // jsonParamHandler 处理JSON参数请求
// func JsonParamHandler(w http.ResponseWriter, r *http.Request) {
// type RequestBody struct {
// Path string `json:"path"`
// }
// var requestBody RequestBody
// if err := json.NewDecoder(r.Body).Decode(&requestBody); err != nil {
// libs.Error(w, "Invalid request body")
// return
// }
// path := requestBody.Path
// fmt.Printf("Parameter 'path' from JSON is: %s\n", path)
// if path != "" {
// resp := Convert(path)
// w.Header().Set("Content-Type", "application/json")
// if err := json.NewEncoder(w).Encode(resp); err != nil {
// libs.Error(w, "Error encoding JSON")
// return
// }
// return
// }
// }
// // HandleURLPost 接收一个POST请求,其中包含一个URL参数,然后处理该URL指向的内容并保存
// func HandleURLPost(w http.ResponseWriter, r *http.Request) {
// var requestBody struct {
// URL string `json:"url"`
// }
// decoder := json.NewDecoder(r.Body)
// if err := decoder.Decode(&requestBody); err != nil {
// libs.Error(w, "Invalid request body")
// return
// }
// resp, err := http.Get(requestBody.URL)
// if err != nil {
// libs.Error(w, "Invalid request url:"+requestBody.URL)
// return
// }
// defer resp.Body.Close()
// body, errRead := io.ReadAll(resp.Body)
// if errRead != nil {
// libs.Error(w, "Invalid request body")
// return
// }
// reader := bytes.NewReader(body)
// res, err := ConvertHTML(reader)
// if err != nil {
// libs.Error(w, "Failed to convert content")
// return
// }
// log.Printf("Converted content: %s", res)
// // 使用通用的SaveContentToFile函数保存内容到文件
// //fileName := "converted_from_url"
// // 获取内容的第一行作为标题
// fileName := strings.SplitN(res, "\n", 2)[0]
// if fileName == "" {
// fileName = "未命名网页"
// }
// fileName = fileName + ".html"
// info, err := SaveContentToFile(res, fileName)
// if err != nil {
// libs.Error(w, "Failed to save converted content to file")
// return
// }
// // 将转换后的数据写入文件
// savePath := info.SavePath + "_result.json"
// // if err := WriteConvertedDataToFile(info.Content, savePath); err != nil {
// // serv.Err("Failed to write converted data to file", w)
// // return
// // }
// // 使用 json.MarshalIndent 直接获取内容的字节切片
// resInfo := ResContentInfo{
// Content: info.Content,
// }
// contents, err := json.MarshalIndent(resInfo, "", " ")
// if err != nil {
// libs.Error(w, "failed to marshal reqBodies to JSON:"+savePath)
// return
// }
// // 将字节切片直接写入文件
// if err := os.WriteFile(savePath, contents, 0644); err != nil {
// libs.Error(w, "failed to write to file:"+savePath)
// return
// }
// w.Header().Set("Content-Type", "application/json")
// if err := json.NewEncoder(w).Encode(info); err != nil {
// libs.Error(w, "Error encoding JSON")
// return
// }
// }
// func ShowDetailHandler(w http.ResponseWriter, r *http.Request) {
// // 从 URL 查询参数中获取图片路径
// filePath := r.URL.Query().Get("path")
// //log.Printf("imagePath: %s", imagePath)
// // 检查图片路径是否为空或无效
// if filePath == "" {
// libs.Error(w, "Invalid file path")
// return
// }
// var reqBodies ResContentInfo
// if libs.PathExists(filePath + "_result.json") {
// //log.Printf("ShowDetailHandler: %s", filePath)
// filePath = filePath + "_result.json"
// content, err := os.ReadFile(filePath)
// if err != nil {
// libs.Error(w, "Failed to open file")
// return
// }
// err = json.Unmarshal(content, &reqBodies)
// if err != nil {
// libs.Error(w, "Failed to read file")
// return
// }
// // 设置响应头
// w.Header().Set("Content-Type", "text/plain; charset=utf-8")
// resContent := reqBodies.Content + "/n"
// for _, image := range reqBodies.Images {
// resContent += image.Content + "/n"
// }
// // 写入响应体
// _, err = w.Write([]byte(resContent))
// if err != nil {
// libs.Error(w, "Failed to write response")
// return
// }
// } else {
// // 确保图片路径是绝对路径
// absImagePath, err := filepath.Abs(filePath)
// //log.Printf("absImagePath: %s", absImagePath)
// if err != nil {
// libs.Error(w, err.Error())
// return
// }
// // 获取文件的 MIME 类型
// mimeType := mime.TypeByExtension(filepath.Ext(absImagePath))
// if mimeType == "" {
// mimeType = "application/octet-stream" // 如果无法识别,就用默认的二进制流类型
// }
// // 设置响应头的 MIME 类型
// w.Header().Set("Content-Type", mimeType)
// // 打开文件并读取内容
// file, err := os.Open(absImagePath)
// if err != nil {
// libs.Error(w, err.Error())
// return
// }
// defer file.Close()
// // 将文件内容写入响应体
// _, err = io.Copy(w, file)
// if err != nil {
// libs.Error(w, err.Error())
// }
// }
// }
// func ServeImage(w http.ResponseWriter, r *http.Request) {
// // 从 URL 查询参数中获取图片路径
// imagePath := r.URL.Query().Get("path")
// //log.Printf("imagePath: %s", imagePath)
// // 检查图片路径是否为空或无效
// if imagePath == "" {
// libs.Error(w, "Invalid image path")
// return
// }
// // 确保图片路径是绝对路径
// absImagePath, err := filepath.Abs(imagePath)
// //log.Printf("absImagePath: %s", absImagePath)
// if err != nil {
// libs.Error(w, err.Error())
// return
// }
// // 获取文件的 MIME 类型
// mimeType := mime.TypeByExtension(filepath.Ext(absImagePath))
// if mimeType == "" {
// mimeType = "application/octet-stream" // 如果无法识别,就用默认的二进制流类型
// }
// // 设置响应头的 MIME 类型
// w.Header().Set("Content-Type", mimeType)
// // 打开文件并读取内容
// file, err := os.Open(absImagePath)
// if err != nil {
// libs.Error(w, err.Error())
// return
// }
// defer file.Close()
// // 将文件内容写入响应体
// _, err = io.Copy(w, file)
// if err != nil {
// libs.Error(w, err.Error())
// }
// }

45
godo/ai/convert/image.go

@ -1,45 +0,0 @@
/*
* GodoOS - A lightweight cloud desktop
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package convert
import (
"io"
"godo/ai/convert/libs"
)
func ConvertImage(r io.Reader) (string, error) {
// 获取临时文件的绝对路径
absFilePath, tmpfile, err := libs.GetTempFile(r, "prefix-image")
if err != nil {
return "", err
}
paths := []string{absFilePath}
// 识别文本
output, err := libs.RunRapid(paths)
if err != nil {
return "", err
}
libs.CloseTempFile(tmpfile)
// resultString, err := libs.ExtractText(output)
// if err != nil {
// return "", err
// }
// fmt.Println(resultString)
return output, nil
}

153
godo/ai/convert/img.go

@ -1,153 +0,0 @@
/*
* GodoOS - A lightweight cloud desktop
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package convert
import (
"crypto/md5"
"fmt"
lib "godo/ai/convert/libs"
"godo/libs"
"io"
"log"
"os"
"path/filepath"
)
type ResContentInfo struct {
Content string `json:"content"`
Images []ImagesInfo `json:"image"`
}
type ImagesInfo struct {
Path string `json:"path"`
Content string `json:"content"`
}
// 计算文件的MD5哈希值
func calculateFileHash(filePath string) (string, error) {
file, err := os.Open(filePath)
if err != nil {
return "", err
}
defer file.Close()
hasher := md5.New()
if _, err := io.Copy(hasher, file); err != nil {
return "", err
}
return fmt.Sprintf("%x", hasher.Sum(nil)), nil
}
// 复制图片并检查大小和MD5
func CopyImages(destDir string) ([]ImagesInfo, error) {
copiedFiles := []ImagesInfo{}
srcDir, err := libs.GetTrueCacheDir()
if !libs.PathExists(srcDir) {
return copiedFiles, fmt.Errorf("source directory does not exist: %s", srcDir)
}
if err != nil {
return copiedFiles, fmt.Errorf("failed to create temporary cache directory: %w", err)
}
if !libs.PathExists(destDir) {
if err := os.MkdirAll(destDir, 0755); err != nil {
return copiedFiles, fmt.Errorf("failed to create destination directory: %w", err)
}
}
err = filepath.Walk(srcDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
ext := filepath.Ext(path)
if isImageExtension(ext) {
destPath := filepath.Join(destDir, info.Name())
// 检查目标文件是否存在且大小相同
if fileInfo, err := os.Stat(destPath); err == nil {
if fileInfo.Size() == info.Size() {
// 文件大小相同,进一步检查MD5
srcHash, err := calculateFileHash(path)
if err != nil {
log.Printf("Error calculating source hash for %s: %v", path, err)
return err
}
destHash, err := calculateFileHash(destPath)
if err != nil {
log.Printf("Error calculating destination hash for %s: %v", destPath, err)
return err
}
if srcHash == destHash {
fmt.Printf("Skipping %s because a file with the same size and content already exists.\n", path)
return nil
}
}
}
paths := []string{path}
content, err := lib.RunRapid(paths)
if err != nil {
content = ""
}
// 复制文件
if err := copyImagesFile(path, destPath); err != nil {
return err
}
copiedFiles = append(copiedFiles, ImagesInfo{Path: destPath, Content: content}) // 记录复制成功的文件路径
fmt.Printf("Copied %s to %s\n", path, destPath)
}
}
return nil
})
defer func() {
os.RemoveAll(srcDir)
}()
if len(copiedFiles) < 1 {
os.RemoveAll(destDir)
}
if err != nil {
return copiedFiles, err
}
return copiedFiles, nil
}
// 辅助函数检查文件扩展名是否为图片
func isImageExtension(ext string) bool {
switch ext {
case ".jpg", ".jpeg", ".jpe", ".jfif", ".jfif-tbnl", ".png", ".gif", ".bmp", ".webp", ".tif", ".tiff":
return true
default:
return false
}
}
func isConvertImageFile(ext string) bool {
switch ext {
case ".docx", ".pdf", ".pptx", ".odt":
return true
default:
return false
}
}
// 复制单个文件
func copyImagesFile(src, dst string) error {
in, err := os.ReadFile(src)
if err != nil {
return err
}
return os.WriteFile(dst, in, 0644)
}

127
godo/ai/convert/libs/file.go

@ -1,127 +0,0 @@
/*
* GodoOS - A lightweight cloud desktop
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package libs
import (
"errors"
"fmt"
"godo/libs"
"io"
"log"
"os"
"path/filepath"
"runtime"
)
func getXpdfDir(exename string) (string, error) {
convertDir, err := getConvertDir()
if err != nil {
log.Fatal(err)
return "", err
}
var path string
if runtime.GOOS == "windows" {
path = filepath.Join(convertDir, "pdf", exename+".exe")
} else {
path = filepath.Join(convertDir, "pdf", exename)
}
if libs.PathExists(path) {
return path, nil
} else {
return "", errors.New("pdf convert exe not found")
}
}
func getRapidDir() (string, error) {
convertDir, err := getConvertDir()
if err != nil {
log.Fatal(err)
return "", err
}
var path string
if runtime.GOOS == "windows" {
path = filepath.Join(convertDir, "rapid", "RapidOcrOnnx.exe")
} else {
path = filepath.Join(convertDir, "rapid", "RapidOcrOnnx")
}
if libs.PathExists(path) {
return path, nil
} else {
return "", errors.New("RapidOcrOnnx not found")
}
}
func getRapidModelDir() (string, error) {
convertDir, err := getConvertDir()
if err != nil {
log.Fatal(err)
return "", err
}
path := filepath.Join(convertDir, "rapid", "models")
if libs.PathExists(path) {
return path, nil
} else {
return "", errors.New("RapidOcrOnnx model not found")
}
}
func getConvertDir() (string, error) {
runDir, err := libs.GetAiRunDir()
if err != nil {
return "", fmt.Errorf("failed to get user home directory: %w", err)
}
return filepath.Join(runDir, "goconv"), nil
}
func GetTempDir(pathname string) (string, error) {
tempDir, err := os.MkdirTemp("", pathname)
if err != nil {
log.Println("Failed to create temporary directory:", err)
return "./", err
}
log.Println("Temporary directory created:", tempDir)
// defer func() {
// os.RemoveAll(tempDir)
// }()
return tempDir, nil
}
func GetTempFile(r io.Reader, prename string) (string, *os.File, error) {
// 创建临时文件
tmpfile, err := os.CreateTemp("", prename)
if err != nil {
return "", tmpfile, err
}
// 将Reader内容写入临时文件
if _, err := io.Copy(tmpfile, r); err != nil {
return "", tmpfile, err
}
// 获取临时文件的绝对路径
absFilePath, err := filepath.Abs(tmpfile.Name())
if err != nil {
return "", tmpfile, err
}
return absFilePath, tmpfile, nil
}
func CloseTempFile(tmpfile *os.File) {
defer func() {
_ = tmpfile.Close()
_ = os.Remove(tmpfile.Name()) // 根据需要决定是否删除临时文件
}()
}

84
godo/ai/convert/libs/kind.go

@ -1,84 +0,0 @@
/*
Type definitions for markdown elements.
*/
/*
* GodoOS - A lightweight cloud desktop
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package libs
import "fmt"
//go:generate stringer -type=Kind
type Kind int
//go:generate stringer -type=ElementType
type ElementType int
// specific types
const (
// block types
Head Kind = iota
Paragraph
List
QuoteBlock
CodeBlock
Rule
// inline types
Emphasis
Strong
Link
Code
Image
)
// element types
const (
Block ElementType = iota
Inline
)
const _Kind_name = "HeadParagraphListQuoteBlockCodeBlockRuleEmphasisStrongLinkCodeImage"
var _Kind_index = [...]uint8{4, 13, 17, 27, 36, 40, 48, 54, 58, 62, 67}
func (i Kind) String() string {
if i < 0 || i >= Kind(len(_Kind_index)) {
return fmt.Sprintf("Kind(%d)", i)
}
hi := _Kind_index[i]
lo := uint8(0)
if i > 0 {
lo = _Kind_index[i-1]
}
return _Kind_name[lo:hi]
}
const _ElementType_name = "BlockInline"
var _ElementType_index = [...]uint8{5, 11}
func (i ElementType) String() string {
if i < 0 || i >= ElementType(len(_ElementType_index)) {
return fmt.Sprintf("ElementType(%d)", i)
}
hi := _ElementType_index[i]
lo := uint8(0)
if i > 0 {
lo = _ElementType_index[i-1]
}
return _ElementType_name[lo:hi]
}

170
godo/ai/convert/libs/rapid.go

@ -1,170 +0,0 @@
/*
* GodoOS - A lightweight cloud desktop
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package libs
import (
"bytes"
_ "embed" // Needed for go:embed
"fmt"
"log"
"os/exec"
"regexp"
"runtime"
"strings"
)
/*
*
./RapidOcrOnnx --models models \
--det ch_PP-OCRv4_det_infer-v7.onnx \
--rec ch_PP-OCRv4_rec_infer-v7.onnx \
--cls ch_ppocr_mobile_v2.0_cls_infer.onnx \
--keys ppocr_keys_v1.txt \
--image $TARGET_IMG \
--numThread $NUM_THREADS \
--padding 50 \
--maxSideLen 1024 \
--boxScoreThresh 0.5 \
--boxThresh 0.3 \
--unClipRatio 1.6 \
--doAngle 1 \
--mostAngle 1 \
--GPU $GPU_INDEX
*/
func RunRapid(imagePaths []string) (string, error) {
results := make([]string, 0, len(imagePaths))
//log.Printf("the image paths are- %v\n", imagePaths)
runFile, err := getRapidDir()
if err != nil {
return "", err
}
modelDir, err := getRapidModelDir()
if err != nil {
return "", err
}
for _, imagePath := range imagePaths {
//log.Printf("the image path is- %v\n", imagePath)
res, err := ConvertImage(runFile, modelDir, imagePath)
if err != nil {
log.Printf("- %v\n", err)
//return "", err
} else {
results = append(results, res)
}
}
//res,err := ConvertImage(tmpfile, imagePath)
finalResult := strings.Join(results, "\n")
return finalResult, err
}
// func GetImageContent(imagePath string) (string, error) {
// runFile, err := getRapidDir()
// if err != nil {
// return "", err
// }
// modelDir, err := getRapidModelDir()
// if err != nil {
// return "", err
// }
// return ConvertImage(runFile, modelDir, imagePath)
// }
func ConvertImage(runFile string, modelDir string, imagePath string) (string, error) {
// 构建命令
cmdArgs := []string{
runFile,
"--models", modelDir,
"--det", "ch_PP-OCRv4_det_infer-v7.onnx",
"--rec", "ch_PP-OCRv4_rec_infer-v7.onnx",
"--cls", "ch_ppocr_mobile_v2.0_cls_infer.onnx",
"--keys", "ppocr_keys_v1.txt",
"--image", imagePath,
"--numThread", fmt.Sprintf("%d", runtime.NumCPU()),
"--padding", "50",
"--maxSideLen", "1024",
"--boxScoreThresh", "0.5",
"--boxThresh", "0.3",
"--unClipRatio", "1.6",
"--doAngle", "1",
"--mostAngle", "1",
"--GPU", "-1",
}
// 打印将要执行的命令行
cmdStr := strings.Join(cmdArgs, " ")
fmt.Printf("Executing command: %s\n", cmdStr)
// 使用Command构造命令
cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out // 捕获标准输出
cmd.Stderr = &stderr // 捕获标准错误
// 执行命令
err := cmd.Run()
if err != nil {
// 打印错误信息
log.Printf("执行命令时出错: %v, stderr: %s", err, stderr.String())
return "", err
}
// 输出命令结果
outputStr := out.String()
//CloseDll(tmpfile)
resText, err := ExtractText(outputStr)
if err != nil {
log.Printf("提取文本时出错: %v", err)
return "", err
}
return resText, err
}
func ExtractText(output string) (string, error) {
// 查找 "=====End detect=====" 的位置
endDetectIndex := strings.Index(output, "=====End detect=====")
if endDetectIndex == -1 {
return "", fmt.Errorf("expected '=====End detect=====' not found in output")
}
// 从 "=====End detect=====" 后面开始提取文本内容
contentStartIndex := endDetectIndex + len("=====End detect=====\n")
if contentStartIndex >= len(output) {
return "", fmt.Errorf("unexpected end of output after '=====End detect====='")
}
// 提取从 "=====End detect=====" 到末尾的字符串,然后去除末尾的花括号
tempContent := output[contentStartIndex:]
// 去除开头的数字和空格,以及 "FullDetectTime(...)" 部分
cleanedContent := strings.TrimSpace(strings.SplitN(tempContent, "\n", 2)[1])
// 确保去除了所有不需要的内容
//cleanedOutput := strings.TrimSuffix(cleanedContent, "}")
// 使用正则表达式去除连续的空行
// 去除单独的 ?、: B、</>,以及它们前后的空白字符
re := regexp.MustCompile(`(?m)^\s*(?:\?|\s*B|</>|:)\s*$`) // (?m) 使 ^ 和 $ 匹配每一行的开始和结束
cleanedOutput := re.ReplaceAllString(cleanedContent, "") // 删除这些行
// 这里的正则表达式匹配一个或多个连续的换行符
re = regexp.MustCompile(`\n\s*\n`)
cleanedOutput = re.ReplaceAllString(cleanedOutput, "\n") // 将连续的空行替换为单个换行符
// 返回提取的文本内容
return cleanedOutput, nil
}

136
godo/ai/convert/libs/xpdf.go

@ -1,136 +0,0 @@
/*
* GodoOS - A lightweight cloud desktop
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package libs
import (
"bytes"
"errors"
"fmt"
"godo/libs"
"log"
"os"
"os/exec"
"path/filepath"
"strings"
)
func RunXpdf(pdfPath string) (string, error) {
tempDir, err := GetTempDir("xpdf-dirs")
if err != nil {
return "", err
}
tempDirSlash := tempDir
if !strings.HasSuffix(tempDir, string(filepath.Separator)) { // 检查路径是否已经以分隔符结尾
tempDirSlash = tempDir + string(filepath.Separator) // 添加分隔符
}
runFile, err := getXpdfDir("pdftopng")
if err != nil {
return "", err
}
// 构建命令
cmdArgs := []string{
runFile,
"-mono",
pdfPath,
tempDirSlash,
}
// 打印将要执行的命令行
cmdStr := strings.Join(cmdArgs, " ")
log.Printf("Executing command: %s\n", cmdStr)
// 使用Command构造命令
cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
//var out bytes.Buffer
var stderr bytes.Buffer
//cmd.Stdout = &out // 捕获标准输出
cmd.Stderr = &stderr // 捕获标准错误
// 执行命令
err = cmd.Run()
if err != nil {
// 打印错误信息
log.Printf("执行命令时出错: %v, stderr: %s", err, stderr.String())
return "", err
}
// 输出命令结果
// outputStr := out.String()
// log.Printf("Output command: %s\n", outputStr)
err = GetImages(pdfPath)
if err != nil {
log.Println("Failed to get images:", err)
return "", err
}
dir, err := os.ReadDir(tempDir)
if err != nil {
log.Println("Failed to read directory:", err)
return "", err
}
imagePaths := []string{}
for _, entry := range dir {
absPath := filepath.Join(tempDir, entry.Name())
//log.Println(absPath)
imagePaths = append(imagePaths, absPath)
}
//log.Printf("imagePaths: %v\n", imagePaths)
if len(imagePaths) < 1 {
return "", errors.New("no images found")
}
text, err := RunRapid(imagePaths)
if err != nil {
log.Println("Failed to run rapid:", err)
return "", err
}
defer func() {
if err := os.RemoveAll(tempDir); err != nil {
log.Printf("Error removing temp dir: %s", err)
}
}()
// go func(pdfPath string) {
// }(pdfPath)
return text, nil
}
func GetImages(pdfPath string) error {
cacheDir := libs.GetCacheDir()
tempDirSlash := cacheDir
if !strings.HasSuffix(cacheDir, string(filepath.Separator)) { // 检查路径是否已经以分隔符结尾
tempDirSlash = cacheDir + string(filepath.Separator) // 添加分隔符
}
//log.Printf("tempDirSlash: %s\n", tempDirSlash)
runFile, err := getXpdfDir("pdfimages")
if err != nil {
return err
}
cmdArgs := []string{
runFile,
"-j",
pdfPath,
tempDirSlash,
}
//log.Printf("Executing command: %s\n", strings.Join(cmdArgs, " "))
cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
var stderr bytes.Buffer
//cmd.Stdout = &out // 捕获标准输出
cmd.Stderr = &stderr // 捕获标准错误
if err := cmd.Run(); err != nil {
log.Printf("执行命令时出错: %v, stderr: %s", err, stderr.String())
return fmt.Errorf("failed to run pdfimages: %w", err)
}
return nil
}

113
godo/ai/convert/main.go

@ -1,113 +0,0 @@
/*
* GodoOS - A lightweight cloud desktop
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package convert
import (
"fmt"
"os"
"path"
"strings"
)
const maxBytes = 1024 << 20 // 1GB
type Res struct {
Status int
Data string
}
// Convert 函数根据文件类型,将文件内容转换为字符串格式。
// 支持的文件类型包括:.doc, .docx, .odt, .pdf, .csv, .xls, .xlsx, .tsv,
// .pptx, .rtf, .epub, .xml, .xhtml, .html, .htm, .jpg, .jpeg, .jpe, .jfif,
// .jfif-tbnl, .png, .gif, .bmp, .webp, .tif, .tiff, .txt, .md。
// 如果文件以 http 开头,将直接调用 ConvertHttp 函数进行处理。
// 参数:
//
// filename string - 文件名或文件URL。
//
// 返回值:
//
// Res - 包含转换结果的状态码和数据。
func Convert(filename string) Res {
//libs.InitConvertDir()
// 检查文件名是否以 http 开头,是则调用 ConvertHttp 处理
if strings.HasPrefix(filename, "http") {
return ConvertHttp(filename)
}
// 尝试打开文件
r, err := os.Open(filename)
if err != nil {
// 打开文件失败,返回错误信息
return Res{
Status: 201,
Data: fmt.Sprintf("error opening file: %v", err),
}
}
// 确保文件在函数返回前被关闭
defer r.Close()
// 获取文件扩展名,并转为小写
ext := strings.ToLower(path.Ext(filename))
var body string
// 根据文件扩展名,调用相应的转换函数
switch ext {
case ".doc":
body, err = ConvertDoc(r)
case ".docx":
body, err = ConvertDocx(r)
case ".odt":
body, err = ConvertODT(r)
// .pages 类型文件的处理暂不支持
// case ".pages":
// return "application/vnd.apple.pages"
case ".pdf":
body, err = ConvertPDF(r)
case ".csv", ".xls", ".xlsx", ".tsv":
body, err = ConvertXlsx(r)
case ".pptx":
body, err = ConvertPptx(r)
case ".rtf":
body, err = ConvertRTF(r)
case ".epub":
body, err = ConvetEpub(r)
case ".xml":
body, err = ConvertXML(r)
case ".xhtml", ".html", ".htm":
body, err = ConvertHTML(r)
case ".jpg", ".jpeg", ".jpe", ".jfif", ".jfif-tbnl", ".png", ".gif", ".bmp", ".webp", ".tif", ".tiff":
body, err = ConvertImage(r)
case ".md":
body, err = ConvertMd(r)
case ".txt":
body, err = ConvertTxt(r)
}
// 转换过程中若发生错误,返回错误信息
if err != nil {
return Res{
Status: 204,
Data: fmt.Sprintf("error opening file: %v", err),
}
}
return Res{
Status: 0,
Data: body,
}
}

114
godo/ai/convert/main_test.go

@ -1,114 +0,0 @@
/*
* GodoOS - A lightweight cloud desktop
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package convert
import (
"fmt"
"path/filepath"
"testing"
)
func TestConvert(t *testing.T) {
tempDir := "./testdata"
cases := []struct {
name string
filename string
expectedRes Res
}{
{
name: "HTTP",
filename: "https://www.baidu.com",
expectedRes: Res{Status: 0, Data: ""},
},
{
name: "docx",
filename: filepath.Join(tempDir, "test.docx"),
expectedRes: Res{Status: 0, Data: ""},
},
{
name: "xls",
filename: filepath.Join(tempDir, "test.xls"),
expectedRes: Res{Status: 0, Data: ""},
},
{
name: "pdf",
filename: filepath.Join(tempDir, "test.pdf"),
expectedRes: Res{Status: 0, Data: ""},
},
{
name: "pptx",
filename: filepath.Join(tempDir, "test.pptx"),
expectedRes: Res{Status: 0, Data: ""},
},
{
name: "rtf",
filename: filepath.Join(tempDir, "test.rtf"),
expectedRes: Res{Status: 0, Data: ""},
},
{
name: "odt",
filename: filepath.Join(tempDir, "test.odt"),
expectedRes: Res{Status: 0, Data: ""},
},
{
name: "txt",
filename: filepath.Join(tempDir, "test.txt"),
expectedRes: Res{Status: 0, Data: ""},
},
{
name: "md",
filename: filepath.Join(tempDir, "test.md"),
expectedRes: Res{Status: 0, Data: ""},
},
{
name: "html",
filename: filepath.Join(tempDir, "test.html"),
expectedRes: Res{Status: 0, Data: ""},
},
{
name: "jpg",
filename: filepath.Join(tempDir, "test.jpg"),
expectedRes: Res{Status: 0, Data: ""},
},
{
name: "xml",
filename: filepath.Join(tempDir, "test.xml"),
expectedRes: Res{Status: 0, Data: ""},
},
{
name: "epub",
filename: filepath.Join(tempDir, "test.epub"),
expectedRes: Res{Status: 0, Data: ""},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
// 调用 Convert 函数并检查结果
res := Convert(tc.filename)
fmt.Printf("res: %v\n", tc.filename)
// 比较结果
if res.Status != tc.expectedRes.Status {
t.Errorf("For case '%s', expected status %d, got %d", tc.name, tc.expectedRes.Status, res.Status)
}
// 如果需要,也可以比较 Data 字段
// 注意:根据实际情况调整比较逻辑,此处省略了对 Data 的直接比较
})
}
}

42
godo/ai/convert/md.go

@ -1,42 +0,0 @@
/*
* GodoOS - A lightweight cloud desktop
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package convert
import (
"io"
"regexp"
"strings"
)
func ConvertMd(r io.Reader) (string, error) {
b, err := io.ReadAll(r)
if err != nil {
return "", err
}
re := regexp.MustCompile(`<[^>]*>`)
content := re.ReplaceAllString(string(b), "")
reMarkdown := regexp.MustCompile(`(\*{1,4}|_{1,4}|\#{1,6})`)
content = reMarkdown.ReplaceAllString(content, "")
// 移除换行符
content = strings.ReplaceAll(content, "\r", "")
content = strings.ReplaceAll(content, "\n", "")
// 移除多余的空格
content = strings.TrimSpace(content)
return content, nil
}

39
godo/ai/convert/pdf.go

@ -1,39 +0,0 @@
/*
* GodoOS - A lightweight cloud desktop
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package convert
import (
"io"
"godo/ai/convert/libs"
)
func ConvertPDF(r io.Reader) (string, error) {
// 获取临时文件的绝对路径
absFilePath, tmpfile, err := libs.GetTempFile(r, "prefix-pdf")
if err != nil {
return "", err
}
output, err := libs.RunXpdf(absFilePath)
if err != nil {
return "", err
}
libs.CloseTempFile(tmpfile)
return output, nil
}

89
godo/ai/convert/pptx.go

@ -1,89 +0,0 @@
/*
* GodoOS - A lightweight cloud desktop
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package convert
import (
"archive/zip"
"bytes"
"fmt"
"io"
"os"
"strings"
)
// ConvertPptx converts an MS PowerPoint pptx file to text.
func ConvertPptx(r io.Reader) (string, error) {
var size int64
// Common case: if the reader is a file (or trivial wrapper), avoid
// loading it all into memory.
var ra io.ReaderAt
if f, ok := r.(interface {
io.ReaderAt
Stat() (os.FileInfo, error)
}); ok {
si, err := f.Stat()
if err != nil {
return "", err
}
size = si.Size()
ra = f
} else {
b, err := io.ReadAll(r)
if err != nil {
return "", nil
}
size = int64(len(b))
ra = bytes.NewReader(b)
}
zr, err := zip.NewReader(ra, size)
if err != nil {
return "", fmt.Errorf("could not unzip: %v", err)
}
zipFiles := mapZipFiles(zr.File)
contentTypeDefinition, err := getContentTypeDefinition(zipFiles["[Content_Types].xml"])
if err != nil {
return "", err
}
var textBody string
for _, override := range contentTypeDefinition.Overrides {
f := zipFiles[override.PartName]
switch override.ContentType {
case "application/vnd.openxmlformats-officedocument.presentationml.slide+xml",
"application/vnd.openxmlformats-officedocument.drawingml.diagramData+xml":
body, err := parseDocxText(f)
if err != nil {
return "", fmt.Errorf("could not parse pptx: %v", err)
}
textBody += body + "\n"
}
}
// 在成功解析ZIP文件后,添加图片提取逻辑
images, err := findImagesInZip(zr)
if err != nil {
fmt.Printf("Error extracting images: %v", err)
}
fmt.Printf("Images: %v", images)
return strings.TrimSuffix(textBody, "\n"), nil
}

521
godo/ai/convert/rtf.go

@ -1,521 +0,0 @@
/*
* GodoOS - A lightweight cloud desktop
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package convert
import (
"bytes"
"errors"
"fmt"
"io"
"strconv"
"strings"
"time"
"github.com/EndFirstCorp/peekingReader"
)
type stack struct {
top *element
size int
}
type element struct {
value string
next *element
}
func (s *stack) Len() int {
return s.size
}
func (s *stack) Push(value string) {
s.top = &element{value, s.top}
s.size++
}
func (s *stack) Peek() string {
if s.size == 0 {
return ""
}
return s.top.value
}
func (s *stack) Pop() string {
if s.size > 0 {
var v string
v, s.top = s.top.value, s.top.next
s.size--
return v
}
return ""
}
// Text is used to convert an io.Reader containing RTF data into
// plain text
func ConvertRTF(r io.Reader) (string, error) {
pr := peekingReader.NewBufReader(r)
var text bytes.Buffer
var symbolStack stack
for b, err := pr.ReadByte(); err == nil; b, err = pr.ReadByte() {
switch b {
case '\\':
err := ReadRtfControl(pr, &symbolStack, &text)
if err != nil {
return "", err
}
case '{', '}':
case '\n', '\r': // noop
default:
text.WriteByte(b)
}
}
return string(text.Bytes()), nil
}
func ReadRtfControl(r peekingReader.Reader, s *stack, text *bytes.Buffer) error {
control, num, err := tokenizeControl(r)
if err != nil {
return err
}
if control == "*" { // this is an extended control sequence
err := readUntilClosingBrace(r)
if err != nil {
return err
}
if last := s.Peek(); last != "" {
val, err := getParams(r) // last control was interrupted, so finish handling Params
handleParams(control, val, text)
return err
}
return nil
}
if isUnicode, u := getUnicode(control); isUnicode {
text.WriteString(u)
return nil
}
if control == "" {
p, err := r.Peek(1)
if err != nil {
return err
}
if p[0] == '\\' || p[0] == '{' || p[0] == '}' { // this is an escaped character
text.WriteByte(p[0])
r.ReadByte()
return nil
}
text.WriteByte('\n')
return nil
}
if control == "binN" {
return handleBinary(r, control, num)
}
if symbol, found := convertSymbol(control); found {
text.WriteString(symbol)
}
val, err := getParams(r)
if err != nil {
return err
}
handleParams(control, val, text)
s.Push(control)
return nil
}
func tokenizeControl(r peekingReader.Reader) (string, int, error) {
var buf bytes.Buffer
isHex := false
numStart := -1
for {
p, err := r.Peek(1)
if err != nil {
return "", -1, err
}
b := p[0]
switch {
case b == '*' && buf.Len() == 0:
r.ReadByte() // consume valid digit
return "*", -1, nil
case b == '\'' && buf.Len() == 0:
isHex = true
buf.WriteByte(b)
r.ReadByte()
case b >= '0' && b <= '9' || b == '-':
if numStart == -1 {
numStart = buf.Len()
} else if numStart == 0 {
return "", -1, errors.New("Unexpected control sequence. Cannot begin with digit")
}
buf.WriteByte(b)
r.ReadByte() // consume valid digit
case b >= 'a' && b <= 'z' || b >= 'A' && b <= 'Z':
if numStart > 0 { // we've already seen alpha character(s) plus digit(s)
c, num := canonicalize(buf.String(), numStart)
return c, num, nil
}
buf.WriteByte(b)
r.ReadByte()
default:
if isHex {
return buf.String(), -1, nil
}
c, num := canonicalize(buf.String(), numStart)
return c, num, nil
}
}
}
func canonicalize(control string, numStart int) (string, int) {
if numStart == -1 || numStart >= len(control) {
return control, -1
}
num, err := strconv.Atoi(control[numStart:])
if err != nil {
return control, -1
}
return control[:numStart] + "N", num
}
func getUnicode(control string) (bool, string) {
if len(control) < 2 || control[0] != '\'' {
return false, ""
}
var buf bytes.Buffer
for i := 1; i < len(control); i++ {
b := control[i]
if b >= '0' && b <= '9' || b >= 'a' && b <= 'f' || b >= 'A' && b <= 'F' {
buf.WriteByte(b)
} else {
break
}
}
after := control[buf.Len()+1:]
num, _ := strconv.ParseInt(buf.String(), 16, 16)
return true, fmt.Sprintf("%c%s", num, after)
}
func getParams(r peekingReader.Reader) (string, error) {
data, err := peekingReader.ReadUntilAny(r, []byte{'\\', '{', '}', '\n', '\r', ';'})
if err != nil {
return "", err
}
p, err := r.Peek(1)
if err != nil {
return "", err
}
if p[0] == ';' { // skip next if it is a semicolon
r.ReadByte()
}
return string(data), nil
}
func handleBinary(r peekingReader.Reader, control string, size int) error {
if control != "binN" { // wrong control type
return nil
}
_, err := r.ReadBytes(size)
if err != nil {
return err
}
return nil
}
func readUntilClosingBrace(r peekingReader.Reader) error {
count := 1
var b byte
var err error
for b, err = r.ReadByte(); err == nil; b, err = r.ReadByte() {
switch b {
case '{':
count++
case '}':
count--
}
if count == 0 {
return nil
}
}
return err
}
func handleParams(control, param string, text *bytes.Buffer) {
if strings.HasPrefix(param, " ") {
param = param[1:]
}
if param == "" {
return
}
switch control {
// Absolution Position Tabs
// case "pindtabqc", "pindtabql", "pindtabqr", "pmartabqc", "pmartabql", "pmartabqr", "ptabldot", "ptablmdot", "ptablminus", "ptablnone", "ptabluscore":
// Associated Character Properties
// case "ab","acaps","acfN","adnN","aexpndN","afN","afsN","ai","alangN","aoutl","ascaps","ashad","astrike","aul","auld","auldb","aulnone","aulw","aupN","dbch","fcsN","hich","loch":
// Bookmarks
// case "bkmkcolfN","bkmkcollN","bkmkend","bkmkstart":
// Bullets and Numbering
// case "ilvlN","listtext","pn ","pnacross ","pnaiu","pnaiud","pnaiueo","pnaiueod","pnb ","pnbidia","pnbidib","pncaps ","pncard ","pncfN ","pnchosung","pncnum","pndbnum","pndbnumd","pndbnumk","pndbnuml","pndbnumt","pndec ","pndecd","pnfN ","pnfsN ","pnganada","pngbnum","pngbnumd","pngbnumk","pngbnuml","pnhang ","pni ","pnindentN ","pniroha","pnirohad","pnlcltr ","pnlcrm ","pnlvlblt ","pnlvlbody ","pnlvlcont ","pnlvlN ","pnnumonce ","pnord ","pnordt ","pnprev ","pnqc ","pnql ","pnqr ","pnrestart ","pnscaps ","pnspN ","pnstartN ","pnstrike ","pntext ","pntxta ","pntxtb ","pnucltr ","pnucrm ","pnul ","pnuld ","pnuldash","pnuldashd","pnuldashdd","pnuldb ","pnulhair","pnulnone ","pnulth","pnulw ","pnulwave","pnzodiac","pnzodiacd","pnzodiacl":
// Character Borders and Shading
// case "chbgbdiag","chbgcross","chbgdcross","chbgdkbdiag","chbgdkcross","chbgdkdcross","chbgdkfdiag","chbgdkhoriz","chbgdkvert","chbgfdiag","chbghoriz","chbgvert","chbrdr","chcbpatN","chcfpatN","chshdngN":
// Character Revision Mark Properties
// case "crauthN","crdateN","deleted","mvauthN ","mvdateN ","mvf","mvt","revauthdelN","revauthN ","revdttmdelN","revdttmN ","revised":
// Character Set
// case "ansi","ansicpgN","fbidis","mac","pc","pca","impr","striked1":
// Code Page Support
// case "cpgN":
// Color Scheme Mapping
// case "colorschememapping":
// Color Table
// case "blueN","caccentfive","caccentfour","caccentone","caccentsix","caccentthree","caccenttwo","cbackgroundone","cbackgroundtwo","cfollowedhyperlink","chyperlink","cmaindarkone","cmaindarktwo","cmainlightone","cmainlighttwo","colortbl","cshadeN","ctextone","ctexttwo","ctintN","greenN","redN":
// Comments (Annotations)
// case "annotation","atnauthor","atndate ","atnicn","atnid","atnparent","atnref ","atntime","atrfend ","atrfstart ":
// Control Words Introduced by Other Microsoft Products
// case "disabled","htmlbase ","htmlrtf","htmltag","mhtmltag","protect","pwdN","urtfN":
// Custom XML Data Properties
// case "datastore":
// Custom XML Tags
// case "xmlattr","xmlattrname","xmlattrnsN","xmlattrvalue","xmlclose","xmlname","xmlnstbl","xmlopen","xmlsdttcell","xmlsdttpara","xmlsdttregular","xmlsdttrow","xmlsdttunknown","xmlnsN":
// Default Fonts
// case "adeffN","adeflangN","deffN","deflangfeN","deflangN","stshfbiN","stshfdbchN","stshfhichN","stshflochN":
// Default Properties
// case "defchp","defpap":
// Document Formatting Properties
// case "aenddoc","aendnotes","afelev","aftnbj","aftncn","aftnnalc","aftnnar","aftnnauc","aftnnchi","aftnnchosung","aftnncnum","aftnndbar","aftnndbnum","aftnndbnumd","aftnndbnumk","aftnndbnumt","aftnnganada","aftnngbnum","aftnngbnumd","aftnngbnumk","aftnngbnuml","aftnnrlc ","aftnnruc ","aftnnzodiac","aftnnzodiacd","aftnnzodiacl","aftnrestart ","aftnrstcont ","aftnsep ","aftnsepc ","aftnstartN","aftntj ","allowfieldendsel","allprot ","alntblind","annotprot ","ApplyBrkRules","asianbrkrule","autofmtoverride","background","bdbfhdr","bdrrlswsix","bookfold","bookfoldrev","bookfoldsheetsN","brdrartN","brkfrm ","cachedcolbal","ctsN","cvmme ","defformat","deftabN","dghoriginN","dghshowN","dghspaceN","dgmargin","dgsnap","dgvoriginN","dgvshowN","dgvspaceN","dntblnsbdb","doctemp","doctypeN","donotembedlingdataN","donotembedsysfontN","donotshowcomments","donotshowinsdel","donotshowmarkup","donotshowprops","enddoc","endnotes","enforceprotN","expshrtn","facingp","fchars","felnbrelev","fetN ","forceupgrade","formdisp ","formprot ","formshade ","fracwidth","fromhtmlN","fromtext","ftnalt ","ftnbj","ftncn","ftnlytwnine","ftnnalc ","ftnnar ","ftnnauc ","ftnnchi ","ftnnchosung","ftnncnum","ftnndbar","ftnndbnum","ftnndbnumd","ftnndbnumk","ftnndbnumt","ftnnganada","ftnngbnum","ftnngbnumd","ftnngbnumk","ftnngbnuml","ftnnrlc ","ftnnruc ","ftnnzodiac","ftnnzodiacd","ftnnzodiacl","ftnrestart","ftnrstcont ","ftnrstpg ","ftnsep","ftnsepc","ftnstartN","ftntj","grfdoceventsN","gutterN","gutterprl","horzdoc","htmautsp","hwelev2007","hyphauto ","hyphcaps ","hyphconsecN ","hyphhotzN","ignoremixedcontentN","ilfomacatclnupN","indrlsweleven","jcompress","jexpand","jsksu","krnprsnet","ksulangN","landscape","lchars","linestartN","linkstyles ","lnbrkrule","lnongrid","ltrdoc","lytcalctblwd","lytexcttp","lytprtmet","lyttblrtgr","makebackup","margbN","marglN","margmirror","margrN","margtN","msmcap","muser","newtblstyruls","nextfile","noafcnsttbl","nobrkwrptbl","nocolbal ","nocompatoptions","nocxsptable","noextrasprl ","nofeaturethrottle","nogrowautofit","noindnmbrts","nojkernpunct","nolead","nolnhtadjtbl","nospaceforul","notabind ","notbrkcnstfrctbl","notcvasp","notvatxbx","nouicompat","noultrlspc","noxlattoyen","ogutterN","oldas","oldlinewrap","otblrul ","paperhN","paperwN","pgbrdrb","pgbrdrfoot","pgbrdrhead","pgbrdrl","pgbrdroptN","pgbrdrr","pgbrdrsnap","pgbrdrt","pgnstartN","prcolbl ","printdata ","private","protlevelN","psover","pszN ","readonlyrecommended","readprot","relyonvmlN","remdttm","rempersonalinfo","revbarN","revisions","revpropN","revprot ","rtldoc","rtlgutter","saveinvalidxml","saveprevpict","showplaceholdtextN","showxmlerrorsN","snaptogridincell","spltpgpar","splytwnine","sprsbsp","sprslnsp","sprsspbf ","sprstsm","sprstsp ","stylelock","stylelockbackcomp","stylelockenforced","stylelockqfset","stylelocktheme","stylesortmethodN","subfontbysize","swpbdr ","template","themelangcsN","themelangfeN","themelangN","toplinepunct","trackformattingN","trackmovesN","transmf ","truncatefontheight","truncex","tsd","twoonone","useltbaln","usenormstyforlist","usexform","utinl","validatexmlN","vertdoc","viewbkspN","viewkindN","viewnobound","viewscaleN","viewzkN","wgrffmtfilter","widowctrl","windowcaption","wpjst","wpsp","wraptrsp ","writereservation","writereservhash","wrppunct","xform":
// Document Variables
// case "docvar":
// Drawing Object Properties
// case "hl","hlfr","hlloc","hlsrc","hrule","hsv":
// Drawing Objects
// case "do ","dobxcolumn ","dobxmargin ","dobxpage ","dobymargin ","dobypage ","dobypara ","dodhgtN ","dolock ","dpaendhol ","dpaendlN ","dpaendsol ","dpaendwN ","dparc ","dparcflipx ","dparcflipy ","dpastarthol ","dpastartlN ","dpastartsol ","dpastartwN ","dpcallout ","dpcoaccent ","dpcoaN ","dpcobestfit ","dpcoborder ","dpcodabs","dpcodbottom ","dpcodcenter ","dpcodescentN","dpcodtop ","dpcolengthN ","dpcominusx ","dpcominusy ","dpcooffsetN ","dpcosmarta ","dpcotdouble ","dpcotright ","dpcotsingle ","dpcottriple ","dpcountN ","dpellipse ","dpendgroup ","dpfillbgcbN ","dpfillbgcgN ","dpfillbgcrN ","dpfillbggrayN ","dpfillbgpal ","dpfillfgcbN ","dpfillfgcgN ","dpfillfgcrN ","dpfillfggrayN ","dpfillfgpal ","dpfillpatN ","dpgroup ","dpline ","dplinecobN ","dplinecogN ","dplinecorN ","dplinedado ","dplinedadodo ","dplinedash ","dplinedot ","dplinegrayN ","dplinehollow ","dplinepal ","dplinesolid ","dplinewN ","dppolycountN ","dppolygon ","dppolyline ","dpptxN ","dpptyN ","dprect ","dproundr ","dpshadow ","dpshadxN ","dpshadyN ","dptxbtlr","dptxbx ","dptxbxmarN ","dptxbxtext ","dptxlrtb","dptxlrtbv","dptxtbrl","dptxtbrlv","dpxN ","dpxsizeN ","dpyN ","dpysizeN ":
// East Asian Control Words
// case "cgridN","g","gcwN","gridtbl","nosectexpand","ulhair":
// Fields
// case "datafield ","date","field","fldalt ","flddirty","fldedit","fldinst","fldlock","fldpriv","fldrslt","fldtype","time","wpeqn":
case "fldrslt":
text.WriteString(param)
// File Table
// case "fidN ","file ","filetbl ","fnetwork ","fnonfilesys","fosnumN ","frelativeN ","fvaliddos ","fvalidhpfs ","fvalidmac ","fvalidntfs ":
// Font (Character) Formatting Properties
case "acccircle", "acccomma", "accdot", "accnone", "accunderdot", "animtextN", "b", "caps", "cbN", "cchsN ", "cfN", "charscalexN", "csN", "dnN", "embo", "expndN", "expndtwN ", "fittextN", "fN", "fsN", "i", "kerningN ", "langfeN", "langfenpN", "langN", "langnpN", "ltrch", "noproof", "nosupersub ", "outl", "plain", "rtlch", "scaps", "shad", "strike", "sub ", "super ", "ul", "ulcN", "uld", "uldash", "uldashd", "uldashdd", "uldb", "ulhwave", "ulldash", "ulnone", "ulth", "ulthd", "ulthdash", "ulthdashd", "ulthdashdd", "ulthldash", "ululdbwave", "ulw", "ulwave", "upN", "v", "webhidden":
text.WriteString(param)
// Font Family
// case "fjgothic","fjminchou","jis","falt ","fbiasN","fbidi","fcharsetN","fdecor","fetch","fmodern","fname","fnil","fontemb","fontfile","fonttbl","fprqN ","froman","fscript","fswiss","ftech","ftnil","fttruetype","panose":
// Footnotes
// case "footnote":
// Form Fields
// case "ffdefresN","ffdeftext","ffentrymcr","ffexitmcr","ffformat","ffhaslistboxN","ffhelptext","ffhpsN","ffl","ffmaxlenN","ffname","ffownhelpN","ffownstatN","ffprotN","ffrecalcN","ffresN","ffsizeN","ffstattext","fftypeN","fftypetxtN","formfield":
// Generator
// case "generator":
// Headers and Footers
// case "footer","footerf","footerl","footerr","header","headerf","headerl","headerr":
// Highlighting
// case "highlightN":
// Hyphenation Information
// case "chhresN","hresN":
// Index Entries
// case "bxe","ixe","pxe","rxe","txe","xe","xefN","yxe":
// Information Group
// case "author","buptim","category","comment","company","creatim","doccomm","dyN","edminsN","hlinkbase","hrN","idN","info","keywords","linkval","manager","minN","moN","nofcharsN","nofcharswsN","nofpagesN","nofwordsN","operator","printim","propname","proptypeN","revtim","secN","staticval","subject","title","userprops","vernN","versionN","yrN":
// List Levels
// case "lvltentative":
// List Table
// case "jclisttab","levelfollowN","levelindentN","leveljcN","leveljcnN","levellegalN","levelnfcN","levelnfcnN","levelnorestartN","levelnumbers","leveloldN","levelpictureN","levelpicturenosize","levelprevN","levelprevspaceN","levelspaceN","levelstartatN","leveltemplateidN","leveltext","lfolevel","list","listhybrid","listidN","listlevel","listname","listoverride","listoverridecountN","listoverrideformatN","listoverridestartat","listoverridetable","listpicture","listrestarthdnN","listsimpleN","liststyleidN","liststylename","listtable","listtemplateidN","lsN":
// Macintosh Edition Manager Publisher Objects
// case "bkmkpub","pubauto":
// Mail Merge
// case "mailmerge","mmaddfieldname","mmattach","mmblanklines","mmconnectstr","mmconnectstrdata","mmdatasource","mmdatatypeaccess","mmdatatypeexcel","mmdatatypefile","mmdatatypeodbc","mmdatatypeodso","mmdatatypeqt","mmdefaultsql","mmdestemail","mmdestfax","mmdestnewdoc 2 007","mmdestprinter","mmerrorsN","mmfttypeaddress","mmfttypebarcode","mmfttypedbcolumn","mmfttypemapped","mmfttypenull","mmfttypesalutation","mmheadersource","mmjdsotypeN","mmlinktoquery","mmmailsubject","mmmaintypecatalog","mmmaintypeemail","mmmaintypeenvelopes","mmmaintypefax","mmmaintypelabels","mmmaintypeletters","mmodso","mmodsoactiveN","mmodsocoldelimN","mmodsocolumnN","mmodsodynaddrN","mmodsofhdrN","mmodsofilter","mmodsofldmpdata","mmodsofmcolumnN","mmodsohashN","mmodsolidN","mmodsomappedname","mmodsoname","mmodsorecipdata","mmodsosort","mmodsosrc ","mmodsotable","mmodsoudl","mmodsoudldata 200 7","mmodsouniquetag","mmquery","mmreccurN","mmshowdata":
// Math
// case "macc","maccPr","maln","malnScr","margPr","margSzN","mbar","mbarPr","mbaseJc","mbegChr","mborderBox","mborderBoxPr","mbox","mboxPr","mbrkBinN","mbrkBinSubN","mbrkN","mcGpN","mcGpRuleN","mchr","mcount","mcSpN","mctrlPr","md","mdefJcN","mdeg","mdegHide","mden","mdiff","mdiffStyN","mdispdefN","mdPr","me","mendChr","meqArr","meqArrPr","mf","mfName","mfPr","mfunc","mfuncPr","mgroupChr","mgroupChrPr","mgrow","mhideBot","mhideLeft","mhideRight","mhideTop","minterSpN","mintLimN","mintraSpN","mjcN","mlim","mlimloc","mlimlow","mlimlowPr","mlimupp","mlimuppPr","mlit","mlMarginN","mm","mmath","mmathFontN","mmathPict","mmathPr","mmaxdist","mmc","mmcJc","mmcPr","mmcs","mmPr","mmr","mnary","mnaryLimN","mnaryPr","mnoBreak","mnor","mnum","mobjDist","moMath","moMathPara","moMathParaPr","mopEmu","mphant","mphantPr","mplcHide","mpos","mpostSpN","mpreSpN","mr","mrad","mradPr","mrMarginN","mrPr","mrSpN","mrSpRuleN","mscrN","msepChr","mshow","mshp","msmallFracN","msPre","msPrePr","msSub","msSubPr","msSubSup","msSubSupPr","msSup","msSupPr","mstrikeBLTR","mstrikeH","mstrikeTLBR","mstrikeV","mstyN","msub","msubHide","msup","msupHide","mtransp","mtype","mvertJc","mwrapIndentN","mwrapRightN","mzeroAsc","mzeroDesc","mzeroWid":
// Microsoft Office Outlook
// case "ebcstart","ebcend":
// Move Bookmarks
// case "mvfmf","mvfml","mvtof","mvtol":
// New Asia Control Words Created by Word
// case "horzvertN","twoinoneN":
// Objects
// case "linkself","objalias","objalignN","objattph","objautlink","objclass","objcropbN","objcroplN","objcroprN","objcroptN","objdata","object","objemb","objhN","objhtml","objicemb","objlink","objlock","objname","objocx","objpub","objscalexN","objscaleyN","objsect","objsetsize","objsub","objtime","objtransyN","objupdate ","objwN","oleclsid","result","rsltbmp","rslthtml","rsltmerge","rsltpict","rsltrtf","rslttxt":
// Paragraph Borders
// case "box","brdrb","brdrbar","brdrbtw","brdrcfN","brdrdash ","brdrdashd","brdrdashdd","brdrdashdot","brdrdashdotdot","brdrdashdotstr","brdrdashsm","brdrdb","brdrdot","brdremboss","brdrengrave","brdrframe","brdrhair","brdrinset","brdrl","brdrnil","brdrnone","brdroutset","brdrr","brdrs","brdrsh","brdrt","brdrtbl","brdrth","brdrthtnlg","brdrthtnmg","brdrthtnsg","brdrtnthlg","brdrtnthmg","brdrtnthsg","brdrtnthtnlg","brdrtnthtnmg","brdrtnthtnsg","brdrtriple","brdrwavy","brdrwavydb","brdrwN","brspN":
// Paragraph Formatting Properties
case "aspalpha", "aspnum", "collapsed", "contextualspace", "cufiN", "culiN", "curiN", "faauto", "facenter", "fafixed", "fahang", "faroman", "favar", "fiN", "hyphpar ", "indmirror", "intbl", "itapN", "keep", "keepn", "levelN", "liN", "linN", "lisaN", "lisbN", "ltrpar", "nocwrap", "noline", "nooverflow", "nosnaplinegrid", "nowidctlpar ", "nowwrap", "outlinelevelN ", "pagebb", "pard", "prauthN", "prdateN", "qc", "qd", "qj", "qkN", "ql", "qr", "qt", "riN", "rinN", "rtlpar", "saautoN", "saN", "sbautoN", "sbN", "sbys", "slmultN", "slN", "sN", "spv", "subdocumentN ", "tscbandhorzeven", "tscbandhorzodd", "tscbandverteven", "tscbandvertodd", "tscfirstcol", "tscfirstrow", "tsclastcol", "tsclastrow", "tscnecell", "tscnwcell", "tscsecell", "tscswcell", "txbxtwalways", "txbxtwfirst", "txbxtwfirstlast", "txbxtwlast", "txbxtwno", "widctlpar", "ytsN":
text.WriteString(param)
// Paragraph Group Properties
// case "pgp","pgptbl","ipgpN":
// Paragraph Revision Mark Properties
// case "dfrauthN","dfrdateN","dfrstart","dfrstop","dfrxst":
// Paragraph Shading
// case "bgbdiag","bgcross","bgdcross","bgdkbdiag","bgdkcross","bgdkdcross","bgdkfdiag","bgdkhoriz","bgdkvert","bgfdiag","bghoriz","bgvert","cbpatN","cfpatN","shadingN":
// Pictures
// case "binN","bliptagN","blipuid","blipupiN","defshp","dibitmapN","emfblip","jpegblip","macpict","nonshppict","picbmp ","picbppN ","piccropbN","piccroplN","piccroprN","piccroptN","pichgoalN","pichN","picprop","picscaled","picscalexN","picscaleyN","pict","picwgoalN","picwN","pmmetafileN","pngblip","shppict","wbitmapN","wbmbitspixelN","wbmplanesN","wbmwidthbyteN","wmetafileN":
// Positioned Objects and Frames
// case "abshN","abslock","absnoovrlpN","abswN","dfrmtxtxN","dfrmtxtyN","dropcapliN ","dropcaptN ","dxfrtextN","frmtxbtlr","frmtxlrtb","frmtxlrtbv","frmtxtbrl","frmtxtbrlv","nowrap","overlay","phcol","phmrg","phpg","posnegxN ","posnegyN ","posxc","posxi","posxl","posxN","posxo","posxr","posyb","posyc","posyil","posyin","posyN","posyout","posyt","pvmrg","pvpara","pvpg","wraparound","wrapdefault","wrapthrough","wraptight":
// Protection Exceptions
// case "protend","protstart":
// Quick Styles
// case "noqfpromote":
// Read-Only Password Protection
// case "password","passwordhash":
// Revision Marks for Paragraph Numbers and ListNum Fields
// case "pnrauthN","pnrdateN","pnrnfcN","pnrnot","pnrpnbrN","pnrrgbN","pnrstartN","pnrstopN","pnrxstN":
// RTF Version
// case "rtfN":
// Section Formatting Properties
case "adjustright", "binfsxnN", "binsxnN", "colnoN ", "colsN", "colsrN ", "colsxN", "colwN ", "dsN", "endnhere", "footeryN", "guttersxnN", "headeryN", "horzsect", "linebetcol", "linecont", "linemodN", "lineppage", "linerestart", "linestartsN", "linexN", "lndscpsxn", "ltrsect", "margbsxnN", "marglsxnN", "margmirsxn", "margrsxnN", "margtsxnN", "pghsxnN", "pgnbidia", "pgnbidib", "pgnchosung", "pgncnum", "pgncont", "pgndbnum", "pgndbnumd", "pgndbnumk", "pgndbnumt", "pgndec", "pgndecd", "pgnganada", "pgngbnum", "pgngbnumd", "pgngbnumk", "pgngbnuml", "pgnhindia", "pgnhindib", "pgnhindic", "pgnhindid", "pgnhnN ", "pgnhnsc ", "pgnhnsh ", "pgnhnsm ", "pgnhnsn ", "pgnhnsp ", "pgnid", "pgnlcltr", "pgnlcrm", "pgnrestart", "pgnstartsN", "pgnthaia", "pgnthaib", "pgnthaic", "pgnucltr", "pgnucrm", "pgnvieta", "pgnxN", "pgnyN", "pgnzodiac", "pgnzodiacd", "pgnzodiacl", "pgwsxnN", "pnseclvlN", "rtlsect", "saftnnalc", "saftnnar", "saftnnauc", "saftnnchi", "saftnnchosung", "saftnncnum", "saftnndbar", "saftnndbnum", "saftnndbnumd", "saftnndbnumk", "saftnndbnumt", "saftnnganada", "saftnngbnum", "saftnngbnumd", "saftnngbnumk", "saftnngbnuml", "saftnnrlc", "saftnnruc", "saftnnzodiac", "saftnnzodiacd", "saftnnzodiacl", "saftnrestart", "saftnrstcont", "saftnstartN", "sbkcol", "sbkeven", "sbknone", "sbkodd", "sbkpage", "sectd", "sectdefaultcl", "sectexpandN", "sectlinegridN", "sectspecifycl", "sectspecifygenN", "sectspecifyl", "sectunlocked", "sftnbj", "sftnnalc", "sftnnar", "sftnnauc", "sftnnchi", "sftnnchosung", "sftnncnum", "sftnndbar", "sftnndbnum", "sftnndbnumd", "sftnndbnumk", "sftnndbnumt", "sftnnganada", "sftnngbnum", "sftnngbnumd", "sftnngbnumk", "sftnngbnuml", "sftnnrlc", "sftnnruc", "sftnnzodiac", "sftnnzodiacd", "sftnnzodiacl", "sftnrestart", "sftnrstcont", "sftnrstpg", "sftnstartN", "sftntj", "srauthN", "srdateN", "titlepg", "vertal", "vertalb", "vertalc", "vertalj", "vertalt", "vertsect":
text.WriteString(param)
// Section Text
case "stextflowN":
text.WriteString(param)
// SmartTag Data
// case "factoidname":
// Special Characters
case "-", ":", "_", "{", "|", "}", "~", "bullet", "chatn", "chdate", "chdpa", "chdpl", "chftn", "chftnsep", "chftnsepc", "chpgn", "chtime", "column", "emdash", "emspace ", "endash", "enspace ", "lbrN", "ldblquote", "line", "lquote", "ltrmark", "page", "par", "qmspace", "rdblquote", "row", "rquote", "rtlmark", "sect", "sectnum", "softcol ", "softlheightN ", "softline ", "softpage ", "tab", "zwbo", "zwj", "zwnbo", "zwnj":
text.WriteString(param)
// Style and Formatting Restrictions
// case "latentstyles","lsdlockeddefN","lsdlockedexcept","lsdlockedN","lsdprioritydefN","lsdpriorityN","lsdqformatdefN","lsdqformatN","lsdsemihiddendefN","lsdsemihiddenN","lsdstimaxN","lsdunhideuseddefN","lsdunhideusedN":
// Style Sheet
// case "additive","alt","ctrl","fnN","keycode","sautoupd","sbasedonN","scompose","shidden","shift","slinkN","slocked","snextN","spersonal","spriorityN","sqformat","sreply","ssemihiddenN","stylesheet","styrsidN","sunhideusedN","tsN","tsrowd":
// Table Definitions
case "cell", "cellxN", "clbgbdiag", "clbgcross", "clbgdcross", "clbgdkbdiag", "clbgdkcross", "clbgdkdcross", "clbgdkfdiag", "clbgdkhor", "clbgdkvert", "clbgfdiag", "clbghoriz", "clbgvert", "clbrdrb", "clbrdrl", "clbrdrr", "clbrdrt", "clcbpatN", "clcbpatrawN", "clcfpatN", "clcfpatrawN", "cldel2007", "cldelauthN", "cldeldttmN", "cldgll", "cldglu", "clFitText", "clftsWidthN", "clhidemark", "clins", "clinsauthN", "clinsdttmN", "clmgf", "clmrg", "clmrgd", "clmrgdauthN", "clmrgddttmN", "clmrgdr", "clNoWrap", "clpadbN", "clpadfbN", "clpadflN", "clpadfrN", "clpadftN", "clpadlN", "clpadrN", "clpadtN", "clshdngN", "clshdngrawN", "clshdrawnil", "clspbN", "clspfbN", "clspflN", "clspfrN", "clspftN", "clsplit", "clsplitr", "clsplN", "clsprN", "clsptN", "cltxbtlr", "cltxlrtb", "cltxlrtbv", "cltxtbrl", "cltxtbrlv", "clvertalb", "clvertalc", "clvertalt", "clvmgf", "clvmrg", "clwWidthN", "irowbandN", "irowN", "lastrow", "ltrrow", "nestcell", "nestrow", "nesttableprops", "nonesttables", "rawclbgbdiag", "rawclbgcross", "rawclbgdcross", "rawclbgdkbdiag", "rawclbgdkcross", "rawclbgdkdcross", "rawclbgdkfdiag", "rawclbgdkhor", "rawclbgdkvert", "rawclbgfdiag", "rawclbghoriz", "rawclbgvert", "rtlrow", "tabsnoovrlp", "taprtl", "tblindN", "tblindtypeN", "tbllkbestfit", "tbllkborder", "tbllkcolor", "tbllkfont", "tbllkhdrcols", "tbllkhdrrows", "tbllklastcol", "tbllklastrow", "tbllknocolband", "tbllknorowband", "tbllkshading", "tcelld", "tdfrmtxtBottomN", "tdfrmtxtLeftN", "tdfrmtxtRightN", "tdfrmtxtTopN", "tphcol", "tphmrg", "tphpg", "tposnegxN", "tposnegyN", "tposxc", "tposxi", "tposxl", "tposxN", "tposxo", "tposxr", "tposyb", "tposyc", "tposyil", "tposyin", "tposyN", "tposyout", "tposyt", "tpvmrg", "tpvpara", "tpvpg", "trauthN", "trautofitN", "trbgbdiag", "trbgcross", "trbgdcross", "trbgdkbdiag", "trbgdkcross", "trbgdkdcross", "trbgdkfdiag", "trbgdkhor", "trbgdkvert", "trbgfdiag", "trbghoriz", "trbgvert", "trbrdrb ", "trbrdrh ", "trbrdrl ", "trbrdrr ", "trbrdrt ", "trbrdrv ", "trcbpatN", "trcfpatN", "trdateN", "trftsWidthAN", "trftsWidthBN", "trftsWidthN", "trgaphN", "trhdr ", "trkeep ", "trkeepfollow", "trleftN", "trowd", "trpaddbN", "trpaddfbN", "trpaddflN", "trpaddfrN", "trpaddftN", "trpaddlN", "trpaddrN", "trpaddtN", "trpadobN", "trpadofbN", "trpadoflN", "trpadofrN", "trpadoftN", "trpadolN", "trpadorN", "trpadotN", "trpatN", "trqc", "trql", "trqr", "trrhN", "trshdngN", "trspdbN", "trspdfbN", "trspdflN", "trspdfrN", "trspdftN", "trspdlN", "trspdrN", "trspdtN", "trspobN", "trspofbN", "trspoflN", "trspofrN", "trspoftN", "trspolN", "trsporN", "trspotN", "trwWidthAN", "trwWidthBN", "trwWidthN":
text.WriteString(param)
// Table of Contents Entries
case "tc", "tcfN", "tclN", "tcn ":
text.WriteString(param)
// Table Styles
// case "tsbgbdiag","tsbgcross","tsbgdcross","tsbgdkbdiag","tsbgdkcross","tsbgdkdcross","tsbgdkfdiag","tsbgdkhor","tsbgdkvert","tsbgfdiag","tsbghoriz","tsbgvert","tsbrdrb","tsbrdrdgl","tsbrdrdgr","tsbrdrh","tsbrdrl","tsbrdrr","tsbrdrr","tsbrdrt","tsbrdrv","tscbandshN","tscbandsvN","tscellcbpatN","tscellcfpatN","tscellpaddbN","tscellpaddfbN","tscellpaddflN","tscellpaddfrN","tscellpaddftN","tscellpaddlN","tscellpaddrN","tscellpaddtN","tscellpctN","tscellwidthftsN","tscellwidthN","tsnowrap","tsvertalb","tsvertalc","tsvertalt":
// Tabs
case "tbN", "tldot", "tleq", "tlhyph", "tlmdot", "tlth", "tlul", "tqc", "tqdec", "tqr", "txN":
text.WriteString(param)
// Theme Data
// case "themedata":
// Theme Font Information
// case "fbimajor","fbiminor","fdbmajor","fdbminor","fhimajor","fhiminor","flomajor","flominor":
// Track Changes
// case "revtbl ":
// Track Changes (Revision Marks)
// case "charrsidN","delrsidN","insrsidN","oldcprops","oldpprops","oldsprops","oldtprops","pararsidN","rsidN","rsidrootN","rsidtbl","sectrsidN","tblrsidN":
// Unicode RTF
// case "ucN","ud","uN","upr":
// User Protection Information
// case "protusertbl":
// Word through Word RTF for Drawing Objects (Shapes)
// case "shp","shpbottomN","shpbxcolumn","shpbxignore","shpbxmargin","shpbxpage","shpbyignore","shpbymargin","shpbypage","shpbypara","shpfblwtxtN","shpfhdrN","shpgrp","shpinst","shpleftN","shplidN","shplockanchor","shprightN","shprslt","shptopN","shptxt","shpwrkN","shpwrN","shpzN","sn","sp","sv","svb":
default:
}
}
func convertSymbol(symbol string) (string, bool) {
switch symbol {
case "bullet":
return "*", true
case "chdate", "chdpa", "chdpl":
return time.Now().Format("2005-01-02"), true
case "chtime":
return time.Now().Format("4:56 pm"), true
case "emdash", "endash":
return "-", true
case "lquote", "rquote":
return "'", true
case "ldblquote", "rdblquote":
return "\"", true
case "line", "lbrN":
return "\n", true
case "cell", "column", "emspace", "enspace", "qmspace", "nestcell", "nestrow", "page", "par", "row", "sect", "tab":
return " ", true
case "|", "~", "-", "_", ":":
return symbol, true
case "chatn", "chftn", "chftnsep", "chftnsepc", "chpgn", "sectnum", "ltrmark", "rtlmark", "zwbo", "zwj", "zwnbo", "zwnj", "softcol",
"softline", "softpage":
return "", true
default:
return "", false
}
}

31
godo/ai/convert/txt.go

@ -1,31 +0,0 @@
/*
* GodoOS - A lightweight cloud desktop
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package convert
import (
"io"
)
func ConvertTxt(r io.Reader) (string, error) {
b, err := io.ReadAll(r)
if err != nil {
return "", err
}
return string(b), nil
}

53
godo/ai/convert/url.go

@ -1,53 +0,0 @@
/*
* GodoOS - A lightweight cloud desktop
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package convert
import (
"fmt"
"io"
"net/http"
"jaytaylor.com/html2text"
)
func resErr(err error) Res {
return Res{
Status: 201,
Data: fmt.Sprintf("error opening file: %v", err),
}
}
func ConvertHttp(url string) Res {
resp, err := http.Get(url)
if err != nil {
return resErr(err)
}
defer resp.Body.Close()
body, errRead := io.ReadAll(resp.Body)
if errRead != nil {
return resErr(errRead)
}
text, err := html2text.FromString(string(body), html2text.Options{PrettyTables: false})
if err != nil {
return resErr(err)
}
return Res{
Status: 0,
Data: text,
}
}

85
godo/ai/convert/xlsx.go

@ -1,85 +0,0 @@
/*
* GodoOS - A lightweight cloud desktop
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package convert
import (
"io"
"strings"
"godo/ai/convert/libs"
"github.com/pbnjay/grate"
_ "github.com/pbnjay/grate/simple" // tsv and csv support
_ "github.com/pbnjay/grate/xls"
_ "github.com/pbnjay/grate/xlsx"
)
// 返回行索引 列索引
func ConvertXlsx(r io.Reader) (string, error) {
absFileFrom, tmpfromfile, err := libs.GetTempFile(r, "prefix-xlsx-from")
if err != nil {
return "", err
}
textByRow := ""
textByColumn := ""
wb, _ := grate.Open(absFileFrom) // open the file
sheets, _ := wb.List() // list available sheets
// 用于存储每一列的内容
columns := make([][]string, 0)
for _, s := range sheets { // enumerate each sheet name
sheet, _ := wb.Get(s) // open the sheet
maxColumns := 0
for sheet.Next() { // enumerate each row of data
row := sheet.Strings() // get the row's content as []string
// 更新最大列数
if len(row) > maxColumns {
maxColumns = len(row)
}
// 跳过空记录
if len(row) == 0 {
continue
}
textByRow += strings.Join(row, "\t") + "\n"
// 初始化列切片
if len(columns) < maxColumns {
columns = make([][]string, maxColumns)
}
// 将每一列的内容添加到对应的列切片中
for i, cell := range row {
columns[i] = append(columns[i], cell)
}
}
}
// 拼接每一列的内容
for _, col := range columns {
textByColumn += strings.Join(col, "\n") + "\n"
}
wb.Close()
libs.CloseTempFile(tmpfromfile)
return textByRow + "\n\n" + textByColumn, nil
}

5
godo/ai/server/chat.go

@ -3,7 +3,6 @@ package server
import (
"encoding/json"
"godo/libs"
"log"
"net/http"
)
@ -17,8 +16,8 @@ func ChatHandler(w http.ResponseWriter, r *http.Request) {
return
}
headers, url, err := GetHeadersAndUrl(req, "chat")
log.Printf("url: %s", url)
log.Printf("headers: %v", headers)
// log.Printf("url: %s", url)
// log.Printf("headers: %v", headers)
if err != nil {
libs.ErrorMsg(w, err.Error())
return

4
godo/ai/server/llms.go

@ -69,6 +69,10 @@ func GetHeadersAndUrl(req map[string]interface{}, chattype string) (map[string]s
typeUrl = "/images/generations"
}
} else if chattype == "text2voice" {
} else if chattype == "voice2text" {
}
return headers, url + typeUrl, nil

5
godo/cmd/main.go

@ -47,11 +47,6 @@ func OsStart() {
log.Fatalf("InitOsSystem error: %v", err)
return
}
err = deps.InitDir()
if err != nil {
log.Fatalf("Init Dir error: %v", err)
return
}
webdav.InitWebdav()
router := mux.NewRouter()
router.Use(recoverMiddleware)

27
godo/deps/darwin.go

@ -1,27 +0,0 @@
//go:build darwin
/*
* GodoAI - A software focused on localizing AI applications
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package deps
import (
_ "embed"
)
//go:embed darwin.zip
var embeddedZip []byte

BIN
godo/deps/darwin/goconv/pdf/pdfimages

Binary file not shown.

BIN
godo/deps/darwin/goconv/pdf/pdftohtml

Binary file not shown.

BIN
godo/deps/darwin/goconv/pdf/pdftopng

Binary file not shown.

BIN
godo/deps/darwin/goconv/rapid/RapidOcrOnnx

Binary file not shown.

BIN
godo/deps/darwin/goconv/rapid/models/ch_PP-OCRv4_det_infer-v7.onnx

Binary file not shown.

BIN
godo/deps/darwin/goconv/rapid/models/ch_PP-OCRv4_rec_infer-v7.onnx

Binary file not shown.

BIN
godo/deps/darwin/goconv/rapid/models/ch_ppocr_mobile_v2.0_cls_infer.onnx

Binary file not shown.

6623
godo/deps/darwin/goconv/rapid/models/ppocr_keys_v1.txt

File diff suppressed because it is too large

101
godo/deps/extract.go

@ -1,101 +0,0 @@
/*
* GodoAI - A software focused on localizing AI applications
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package deps
import (
"archive/zip"
"bytes"
"fmt"
"godo/libs"
"io"
"os"
"path/filepath"
"strings"
)
func InitDir() error {
// 获取当前用户主目录
runDir := libs.GetAiExeDir()
if !libs.PathExists(runDir) {
if err := os.MkdirAll(runDir, 0o755); err != nil {
return fmt.Errorf("failed to create user directory: %v", err)
}
err := ExtractEmbeddedZip(runDir)
if err != nil {
return fmt.Errorf("failed to extract embedded zip: %v", err)
}
}
return nil
}
// ExtractEmbeddedZip 解压嵌入的ZIP文件到指定目录
func ExtractEmbeddedZip(exeDir string) error {
// 使用内存缓冲区来读取嵌入的ZIP数据
reader := bytes.NewReader(embeddedZip)
zipReader, err := zip.NewReader(reader, int64(len(embeddedZip)))
if err != nil {
return fmt.Errorf("failed to create zip reader: %v", err)
}
// 遍历ZIP文件中的每个条目并解压
for _, zipEntry := range zipReader.File {
// 检查条目名称是否以"."开头,如果是,则跳过
if strings.HasPrefix(zipEntry.Name, ".") {
fmt.Printf("Skipping hidden entry: %s\n", zipEntry.Name)
continue
}
// 构建解压后的文件或目录路径
entryPath := filepath.Join(exeDir, zipEntry.Name)
// 如果是目录,则创建目录
if zipEntry.FileInfo().IsDir() {
if err := os.MkdirAll(entryPath, zipEntry.Mode()); err != nil {
return fmt.Errorf("failed to create directory: %v", err)
}
continue
}
// 如果是文件,则解压文件
zipFile, err := zipEntry.Open()
if err != nil {
return fmt.Errorf("failed to open zip file entry: %v", err)
}
defer zipFile.Close()
// 确保目标文件的父目录存在
if err := os.MkdirAll(filepath.Dir(entryPath), 0755); err != nil {
return fmt.Errorf("failed to create parent directory: %v", err)
}
dstFile, err := os.OpenFile(entryPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755)
if err != nil {
return fmt.Errorf("failed to create destination file: %v", err)
}
defer dstFile.Close()
if _, err := io.Copy(dstFile, zipFile); err != nil {
return fmt.Errorf("failed to copy content to destination file: %v", err)
}
}
fmt.Println("Embedded ZIP extracted to", exeDir)
return nil
}

27
godo/deps/linux.go

@ -1,27 +0,0 @@
//go:build linux
/*
* GodoAI - A software focused on localizing AI applications
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package deps
import (
_ "embed"
)
//go:embed linux.zip
var embeddedZip []byte

BIN
godo/deps/linux/goconv/pdf/pdfimages

Binary file not shown.

BIN
godo/deps/linux/goconv/pdf/pdftohtml

Binary file not shown.

BIN
godo/deps/linux/goconv/pdf/pdftopng

Binary file not shown.

BIN
godo/deps/linux/goconv/rapid/RapidOcrOnnx

Binary file not shown.

BIN
godo/deps/linux/goconv/rapid/models/ch_PP-OCRv4_det_infer-v7.onnx

Binary file not shown.

BIN
godo/deps/linux/goconv/rapid/models/ch_PP-OCRv4_rec_infer-v7.onnx

Binary file not shown.

BIN
godo/deps/linux/goconv/rapid/models/ch_ppocr_mobile_v2.0_cls_infer.onnx

Binary file not shown.

6623
godo/deps/linux/goconv/rapid/models/ppocr_keys_v1.txt

File diff suppressed because it is too large

27
godo/deps/windows.go

@ -1,27 +0,0 @@
//go:build windows
/*
* GodoAI - A software focused on localizing AI applications
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package deps
import (
_ "embed"
)
//go:embed windows.zip
var embeddedZip []byte

BIN
godo/deps/windows/goconv/pdf/pdfimages.exe

Binary file not shown.

BIN
godo/deps/windows/goconv/pdf/pdftohtml.exe

Binary file not shown.

BIN
godo/deps/windows/goconv/pdf/pdftopng.exe

Binary file not shown.

BIN
godo/deps/windows/goconv/rapid/RapidOcrOnnx.exe

Binary file not shown.

BIN
godo/deps/windows/goconv/rapid/models/ch_PP-OCRv4_det_infer-v7.onnx

Binary file not shown.

BIN
godo/deps/windows/goconv/rapid/models/ch_PP-OCRv4_rec_infer-v7.onnx

Binary file not shown.

BIN
godo/deps/windows/goconv/rapid/models/ch_ppocr_mobile_v2.0_cls_infer.onnx

Binary file not shown.

6623
godo/deps/windows/goconv/rapid/models/ppocr_keys_v1.txt

File diff suppressed because it is too large

201
godo/office/LICENSE

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

75
godo/office/Readme.md

@ -0,0 +1,75 @@
# 📄 Gh0ffice (Office/PDF File Parser)
## Modifications
- 2024=12-08 godoos: add support for odt/epub/xml/rtf/md/txt/html/json files
This Go-based project provides a robust parser for various office document formats, including DOCX/DOC, PPTX/PPT, XLSX/XLS, and PDF. The parser extracts both content and metadata from these file types, allowing easy access to structured document data for further processing or analysis.
## 🛠 Features
- **Metadata Extraction**: Captures essential metadata such as title, author, keywords, and modification dates.
- **Content Parsing**: Supports extraction of text content from multiple file formats.
- **Extensible Architecture**: Easily add support for new file formats by implementing additional reader functions.
## 📂 Supported Formats
- **DOCX**: Extracts text content from Word documents.
- **PPTX**: Extracts text content from PowerPoint presentations.
- **XLSX**: Extracts data from Excel spreadsheets.
- **DOC**: Extracts text content from Legacy Word documents.
- **PPT**: Extracts text content from Legacy PowerPoint presentations.
- **XLS**: Extracts data from Legacy Excel spreadsheets.
- **PDF**: Extracts text content from PDF files (note that some complex PDFs may not be fully supported).
## 📖 Installation
To use this project, ensure you have Go installed on your system. Clone this repository and run the following command to install the dependencies:
```bash
go mod tidy
```
## 🚀 Usage
### Basic Usage
You can inspect a document and extract its content and metadata by calling the `inspectDocument` function with the file path as follows:
```go
doc, err := gh0ffice.InspectDocument("path/to/your/file.docx")
if err != nil {
log.Fatalf("Error reading document: %s", err)
}
fmt.Printf("Title: %s\n", doc.Title)
fmt.Printf("Content: %s\n", doc.Content)
```
### Debugging
Set the `DEBUG` variable to `true` to enable logging for more verbose output during the parsing process:
```go
const DEBUG bool = true
```
## ⚠️ Limitations
- The PDF parsing may fail on certain complex or malformed documents.
- Only straightforward text extraction is performed; formatting and images are not considered.
- Compatibility tested primarily on major office file formats.
## 📝 License
This project is licensed under the Apache License, Version 2.0. See the [LICENSE](LICENSE) file for more details.
## 📬 Contributing
Contributions are welcome! Please feel free to create issues or submit pull requests for new features or bug fixes.
## 👥 Author
This project is maintained by the team and community of YT-Gh0st. Contributions and engagements are always welcome!
---
For any questions or suggestions, feel free to reach out. Happy parsing! 😊

27
godo/office/darwin.go

@ -0,0 +1,27 @@
//go:build darwin
// +build darwin
package office
import (
"os"
"syscall"
"time"
)
func getFileInfoData(data *Document) (bool, error) {
fileinfo, err := os.Stat(data.path)
if err != nil {
return false, err
}
data.Filename = fileinfo.Name()
data.Title = data.Filename
data.Size = int(fileinfo.Size())
stat := fileinfo.Sys().(*syscall.Stat_t)
data.Createtime = time.Unix(stat.Birthtimespec.Sec, stat.Birthtimespec.Nsec)
data.Modifytime = time.Unix(stat.Mtimespec.Sec, stat.Mtimespec.Nsec)
data.Accesstime = time.Unix(stat.Atimespec.Sec, stat.Atimespec.Nsec)
return true, nil
}

545
godo/office/doc.go

@ -0,0 +1,545 @@
package office
import (
"bytes"
"encoding/binary"
"errors"
"io"
"unicode/utf16"
"unicode/utf8"
"github.com/mattetti/filebuffer"
"github.com/richardlehane/mscfb"
)
// ---- file doc.go ----
// There were a few changes in this file to actually support Unicode which the old code was not.
var (
errTable = errors.New("cannot find table stream")
errDocEmpty = errors.New("WordDocument not found")
// errDocShort = errors.New("wordDoc block too short")
errInvalidArgument = errors.New("invalid table and/or fib")
)
type allReader interface {
io.Closer
io.ReaderAt
io.ReadSeeker
}
func wrapError(e error) error {
return errors.New("Error processing file: " + e.Error())
}
// DOC2Text converts a standard io.Reader from a Microsoft Word .doc binary file and returns a reader (actually a bytes.Buffer) which will output the plain text found in the .doc file
func DOC2Text(r io.Reader) (io.Reader, error) {
ra, ok := r.(io.ReaderAt)
if !ok {
ra, _, err := toMemoryBuffer(r)
if err != nil {
return nil, wrapError(err)
}
defer ra.Close()
}
d, err := mscfb.New(ra)
if err != nil {
return nil, wrapError(err)
}
wordDoc, table0, table1 := getWordDocAndTables(d)
fib, err := getFib(wordDoc)
if err != nil {
return nil, wrapError(err)
}
table := getActiveTable(table0, table1, fib)
if table == nil {
return nil, wrapError(errTable)
}
clx, err := getClx(table, fib)
if err != nil {
return nil, wrapError(err)
}
return getText(wordDoc, clx)
}
func toMemoryBuffer(r io.Reader) (allReader, int64, error) {
var b bytes.Buffer
size, err := b.ReadFrom(r)
if err != nil {
return nil, 0, err
}
fb := filebuffer.New(b.Bytes())
return fb, size, nil
}
func getText(wordDoc *mscfb.File, clx *clx) (io.Reader, error) {
var buf bytes.Buffer
for i := 0; i < len(clx.pcdt.PlcPcd.aPcd); i++ {
pcd := clx.pcdt.PlcPcd.aPcd[i]
cp := clx.pcdt.PlcPcd.aCP[i]
cpNext := clx.pcdt.PlcPcd.aCP[i+1]
var start, end int
// https://msdn.microsoft.com/ko-kr/library/office/gg615596(v=office.14).aspx
// Read the value of the Pcd.Fc.fCompressed field at bit 46 of the current Pcd structure. If 0, the Pcd structure refers to a 16-bit Unicode character. If 1, it refers to an 8-bit ANSI character.
if pcd.fc.fCompressed {
start = pcd.fc.fc / 2
end = start + cpNext - cp
} else {
// -> 16-bit Unicode characters
start = pcd.fc.fc
end = start + 2*(cpNext-cp)
}
b := make([]byte, end-start)
_, err := wordDoc.ReadAt(b, int64(start)) // read all the characters
if err != nil {
return nil, err
}
translateText(b, &buf, pcd.fc.fCompressed)
}
return &buf, nil
}
// translateText translates the buffer into text. fCompressed = 0 for 16-bit Unicode, 1 = 8-bit ANSI characters.
func translateText(b []byte, buf *bytes.Buffer, fCompressed bool) {
u16s := make([]uint16, 1)
b8buf := make([]byte, 4)
fieldLevel := 0
var isFieldChar bool
for cIndex := range b {
// Convert to rune
var char rune
if fCompressed {
// ANSI, 1 byte
char = rune(b[cIndex])
} else {
// 16-bit Unicode: skip every second byte
if cIndex%2 != 0 {
continue
} else if (cIndex + 1) >= len(b) { // make sure there are at least 2 bytes for Unicode decoding
continue
}
// convert from UTF16 to UTF8
u16s[0] = uint16(b[cIndex]) + (uint16(b[cIndex+1]) << 8)
r := utf16.Decode(u16s)
if len(r) != 1 {
//fmt.Printf("Invalid rune %v\n", r)
continue
}
char = r[0]
}
// Handle special field characters (section 2.8.25)
if char == 0x13 {
isFieldChar = true
fieldLevel++
continue
} else if char == 0x14 {
isFieldChar = false
continue
} else if char == 0x15 {
isFieldChar = false
continue
} else if isFieldChar {
continue
}
if char == 7 { // table column separator
buf.WriteByte(' ')
continue
} else if char < 32 && char != 9 && char != 10 && char != 13 { // skip non-printable ASCII characters
//buf.Write([]byte(fmt.Sprintf("|%#x|", char)))
continue
}
if fCompressed { // compressed, so replace compressed characters
buf.Write(replaceCompressed(byte(char)))
} else {
// encode the rune to UTF-8
n := utf8.EncodeRune(b8buf, char)
buf.Write(b8buf[:n])
}
}
}
func replaceCompressed(char byte) []byte {
var v uint16
switch char {
case 0x82:
v = 0x201A
case 0x83:
v = 0x0192
case 0x84:
v = 0x201E
case 0x85:
v = 0x2026
case 0x86:
v = 0x2020
case 0x87:
v = 0x2021
case 0x88:
v = 0x02C6
case 0x89:
v = 0x2030
case 0x8A:
v = 0x0160
case 0x8B:
v = 0x2039
case 0x8C:
v = 0x0152
case 0x91:
v = 0x2018
case 0x92:
v = 0x2019
case 0x93:
v = 0x201C
case 0x94:
v = 0x201D
case 0x95:
v = 0x2022
case 0x96:
v = 0x2013
case 0x97:
v = 0x2014
case 0x98:
v = 0x02DC
case 0x99:
v = 0x2122
case 0x9A:
v = 0x0161
case 0x9B:
v = 0x203A
case 0x9C:
v = 0x0153
case 0x9F:
v = 0x0178
default:
return []byte{char}
}
out := make([]byte, 2)
binary.LittleEndian.PutUint16(out, v)
return out
}
func getWordDocAndTables(r *mscfb.Reader) (*mscfb.File, *mscfb.File, *mscfb.File) {
var wordDoc, table0, table1 *mscfb.File
for i := 0; i < len(r.File); i++ {
stream := r.File[i]
switch stream.Name {
case "WordDocument":
wordDoc = stream
case "0Table":
table0 = stream
case "1Table":
table1 = stream
}
}
return wordDoc, table0, table1
}
func getActiveTable(table0 *mscfb.File, table1 *mscfb.File, f *fib) *mscfb.File {
if f.base.fWhichTblStm == 0 {
return table0
}
return table1
}
// ---- file fib.go ----
var (
errFibInvalid = errors.New("file information block validation failed")
)
type fib struct {
base fibBase
csw int
fibRgW fibRgW
cslw int
fibRgLw fibRgLw
cbRgFcLcb int
fibRgFcLcb fibRgFcLcb
}
type fibBase struct {
fWhichTblStm int
}
type fibRgW struct {
}
type fibRgLw struct {
ccpText int
ccpFtn int
ccpHdd int
ccpMcr int
ccpAtn int
ccpEdn int
ccpTxbx int
ccpHdrTxbx int
cpLength int
}
type fibRgFcLcb struct {
fcPlcfFldMom int
lcbPlcfFldMom int
fcPlcfFldHdr int
lcbPlcfFldHdr int
fcPlcfFldFtn int
lcbPlcfFldFtn int
fcPlcfFldAtn int
lcbPlcfFldAtn int
fcClx int
lcbClx int
}
// parse File Information Block (section 2.5.1)
func getFib(wordDoc *mscfb.File) (*fib, error) {
if wordDoc == nil {
return nil, errDocEmpty
}
b := make([]byte, 898) // get FIB block up to FibRgFcLcb97
_, err := wordDoc.ReadAt(b, 0)
if err != nil {
return nil, err
}
fibBase := getFibBase(b[0:32])
fibRgW, csw, err := getFibRgW(b, 32)
if err != nil {
return nil, err
}
fibRgLw, cslw, err := getFibRgLw(b, 34+csw)
if err != nil {
return nil, err
}
fibRgFcLcb, cbRgFcLcb, err := getFibRgFcLcb(b, 34+csw+2+cslw)
return &fib{base: *fibBase, csw: csw, cslw: cslw, fibRgW: *fibRgW, fibRgLw: *fibRgLw, fibRgFcLcb: *fibRgFcLcb, cbRgFcLcb: cbRgFcLcb}, err
}
// parse FibBase (section 2.5.2)
func getFibBase(fib []byte) *fibBase {
byt := fib[11] // fWhichTblStm is 2nd highest bit in this byte
fWhichTblStm := int(byt >> 1 & 1) // set which table (0Table or 1Table) is the table stream
return &fibBase{fWhichTblStm: fWhichTblStm}
}
func getFibRgW(fib []byte, start int) (*fibRgW, int, error) {
if start+2 >= len(fib) { // must be big enough for csw
return &fibRgW{}, 0, errFibInvalid
}
csw := int(binary.LittleEndian.Uint16(fib[start:start+2])) * 2 // in bytes
return &fibRgW{}, csw, nil
}
// parse FibRgLw (section 2.5.4)
func getFibRgLw(fib []byte, start int) (*fibRgLw, int, error) {
fibRgLwStart := start + 2 // skip cslw
if fibRgLwStart+88 >= len(fib) { // expect 88 bytes in fibRgLw
return &fibRgLw{}, 0, errFibInvalid
}
cslw := getInt16(fib, start) * 4 // in bytes
ccpText := getInt(fib, fibRgLwStart+3*4)
ccpFtn := getInt(fib, fibRgLwStart+4*4)
ccpHdd := getInt(fib, fibRgLwStart+5*4)
ccpMcr := getInt(fib, fibRgLwStart+6*4)
ccpAtn := getInt(fib, fibRgLwStart+7*4)
ccpEdn := getInt(fib, fibRgLwStart+8*4)
ccpTxbx := getInt(fib, fibRgLwStart+9*4)
ccpHdrTxbx := getInt(fib, fibRgLwStart+10*4)
// calculate cpLength. Used in PlcPcd verification (see section 2.8.35)
var cpLength int
if ccpFtn != 0 || ccpHdd != 0 || ccpMcr != 0 || ccpAtn != 0 || ccpEdn != 0 || ccpTxbx != 0 || ccpHdrTxbx != 0 {
cpLength = ccpFtn + ccpHdd + ccpMcr + ccpAtn + ccpEdn + ccpTxbx + ccpHdrTxbx + ccpText + 1
} else {
cpLength = ccpText
}
return &fibRgLw{ccpText: ccpText, ccpFtn: ccpFtn, ccpHdd: ccpHdd, ccpMcr: ccpMcr, ccpAtn: ccpAtn,
ccpEdn: ccpEdn, ccpTxbx: ccpTxbx, ccpHdrTxbx: ccpHdrTxbx, cpLength: cpLength}, cslw, nil
}
// parse FibRgFcLcb (section 2.5.5)
func getFibRgFcLcb(fib []byte, start int) (*fibRgFcLcb, int, error) {
fibRgFcLcbStart := start + 2 // skip cbRgFcLcb
if fibRgFcLcbStart+186*4 < len(fib) { // expect 186+ values in FibRgFcLcb
return &fibRgFcLcb{}, 0, errFibInvalid
}
cbRgFcLcb := getInt16(fib, start)
fcPlcfFldMom := getInt(fib, fibRgFcLcbStart+32*4)
lcbPlcfFldMom := getInt(fib, fibRgFcLcbStart+33*4)
fcPlcfFldHdr := getInt(fib, fibRgFcLcbStart+34*4)
lcbPlcfFldHdr := getInt(fib, fibRgFcLcbStart+35*4)
fcPlcfFldFtn := getInt(fib, fibRgFcLcbStart+36*4)
lcbPlcfFldFtn := getInt(fib, fibRgFcLcbStart+37*4)
fcPlcfFldAtn := getInt(fib, fibRgFcLcbStart+38*4)
lcbPlcfFldAtn := getInt(fib, fibRgFcLcbStart+39*4)
fcClx := getInt(fib, fibRgFcLcbStart+66*4)
lcbClx := getInt(fib, fibRgFcLcbStart+67*4)
return &fibRgFcLcb{fcPlcfFldMom: fcPlcfFldMom, lcbPlcfFldMom: lcbPlcfFldMom, fcPlcfFldHdr: fcPlcfFldHdr, lcbPlcfFldHdr: lcbPlcfFldHdr,
fcPlcfFldFtn: fcPlcfFldFtn, lcbPlcfFldFtn: lcbPlcfFldFtn, fcPlcfFldAtn: fcPlcfFldAtn, lcbPlcfFldAtn: lcbPlcfFldAtn,
fcClx: fcClx, lcbClx: lcbClx}, cbRgFcLcb, nil
}
func getInt16(buf []byte, start int) int {
return int(binary.LittleEndian.Uint16(buf[start : start+2]))
}
func getInt(buf []byte, start int) int {
return int(binary.LittleEndian.Uint32(buf[start : start+4]))
}
// ---- file clx.go ----
var (
errInvalidPrc = errors.New("invalid Prc structure")
errInvalidClx = errors.New("expected last aCP value to equal fib.cpLength (2.8.35)")
errInvalidPcdt = errors.New("expected clxt to be equal 0x02")
)
type clx struct {
pcdt pcdt
}
type pcdt struct {
lcb int
PlcPcd plcPcd
}
type plcPcd struct {
aCP []int
aPcd []pcd
}
type pcd struct {
fc fcCompressed
}
type fcCompressed struct {
fc int
fCompressed bool
}
// read Clx (section 2.9.38)
func getClx(table *mscfb.File, fib *fib) (*clx, error) {
if table == nil || fib == nil {
return nil, errInvalidArgument
}
b, err := readClx(table, fib)
if err != nil {
return nil, err
}
pcdtOffset, err := getPrcArrayEnd(b)
if err != nil {
return nil, err
}
pcdt, err := getPcdt(b, pcdtOffset)
if err != nil {
return nil, err
}
if pcdt.PlcPcd.aCP[len(pcdt.PlcPcd.aCP)-1] != fib.fibRgLw.cpLength {
return nil, errInvalidClx
}
return &clx{pcdt: *pcdt}, nil
}
func readClx(table *mscfb.File, fib *fib) ([]byte, error) {
b := make([]byte, fib.fibRgFcLcb.lcbClx)
_, err := table.ReadAt(b, int64(fib.fibRgFcLcb.fcClx))
if err != nil {
return nil, err
}
return b, nil
}
// read Pcdt from Clx (section 2.9.178)
func getPcdt(clx []byte, pcdtOffset int) (*pcdt, error) {
const pcdSize = 8
if pcdtOffset < 0 || pcdtOffset+5 >= len(clx) {
return nil, errInvalidPcdt
}
if clx[pcdtOffset] != 0x02 { // clxt must be 0x02 or invalid
return nil, errInvalidPcdt
}
lcb := int(binary.LittleEndian.Uint32(clx[pcdtOffset+1 : pcdtOffset+5])) // skip clxt, get lcb
plcPcdOffset := pcdtOffset + 5 // skip clxt and lcb
numPcds := (lcb - 4) / (4 + pcdSize) // see 2.2.2 in the spec for equation
numCps := numPcds + 1 // always 1 more cp than pcds
cps := make([]int, numCps)
for i := 0; i < numCps; i++ {
cpOffset := plcPcdOffset + i*4
if cpOffset < 0 || cpOffset+4 >= len(clx) {
return nil, errInvalidPcdt
}
cps[i] = int(binary.LittleEndian.Uint32(clx[cpOffset : cpOffset+4]))
}
pcdStart := plcPcdOffset + 4*numCps
pcds := make([]pcd, numPcds)
for i := 0; i < numPcds; i++ {
pcdOffset := pcdStart + i*pcdSize
if pcdOffset < 0 || pcdOffset+pcdSize > len(clx) {
return nil, errInvalidPcdt
}
pcds[i] = *parsePcd(clx[pcdOffset : pcdOffset+pcdSize])
}
return &pcdt{lcb: lcb, PlcPcd: plcPcd{aCP: cps, aPcd: pcds}}, nil
}
// find end of RgPrc array (section 2.9.38)
func getPrcArrayEnd(clx []byte) (int, error) {
prcOffset := 0
count := 0
for {
clxt := clx[prcOffset]
if clxt != 0x01 { // this is not a Prc, so exit
return prcOffset, nil
}
prcDataCbGrpprl := binary.LittleEndian.Uint16(clx[prcOffset+1 : prcOffset+3]) // skip the clxt and read 2 bytes
prcOffset += 1 + 2 + int(prcDataCbGrpprl) // skip clxt, cbGrpprl, and GrpPrl
if count > 10000 || prcDataCbGrpprl <= 0 || prcOffset+3 > len(clx) { // ensure no infinite loop
return 0, errInvalidPrc
}
count++
}
}
// parse Pcd (section 2.9.177)
func parsePcd(pcdData []byte) *pcd {
return &pcd{fc: *parseFcCompressed(pcdData[2:6])}
}
// parse FcCompressed (section 2.9.73)
func parseFcCompressed(fcData []byte) *fcCompressed {
fCompressed := fcData[3]&64 == 64 // check fcompressed value (second bit from lestmost of the last byte in fcdata)
fcData[3] = fcData[3] & 63 // clear the fcompressed value from data
fc := binary.LittleEndian.Uint32(fcData) // word doc generally uses little endian order (1.3.7)
return &fcCompressed{fc: int(fc), fCompressed: fCompressed}
}
// IsFileDOC checks if the data indicates a DOC file
// DOC has multiple signature according to https://filesignatures.net/index.php?search=doc&mode=EXT, D0 CF 11 E0 A1 B1 1A E1
func IsFileDOC(data []byte) bool {
return bytes.HasPrefix(data, []byte{0xD0, 0xCF, 0x11, 0xE0, 0xA1, 0xB1, 0x1A, 0xE1})
}

412
godo/office/docx.go

@ -0,0 +1,412 @@
package office
import (
"archive/zip"
"bufio"
"bytes"
"encoding/xml"
"fmt"
"io"
"io/fs"
"os"
"strings"
)
// Contains functions to work with data from a zip file
type ZipData interface {
files() []*zip.File
close() error
}
// Type for in memory zip files
type ZipInMemory struct {
data *zip.Reader
}
func (d ZipInMemory) files() []*zip.File {
return d.data.File
}
// Since there is nothing to close for in memory, just nil the data and return nil
func (d ZipInMemory) close() error {
return nil
}
// Type for zip files read from disk
type ZipFile struct {
data *zip.ReadCloser
}
func (d ZipFile) files() []*zip.File {
return d.data.File
}
func (d ZipFile) close() error {
return d.data.Close()
}
type ReplaceDocx struct {
zipReader ZipData
content string
links string
headers map[string]string
footers map[string]string
images map[string]string
}
func (r *ReplaceDocx) Editable() *Docx {
return &Docx{
files: r.zipReader.files(),
content: r.content,
links: r.links,
headers: r.headers,
footers: r.footers,
images: r.images,
}
}
func (r *ReplaceDocx) Close() error {
return r.zipReader.close()
}
type Docx struct {
files []*zip.File
content string
links string
headers map[string]string
footers map[string]string
images map[string]string
}
func (d *Docx) GetContent() string {
return d.content
}
func (d *Docx) SetContent(content string) {
d.content = content
}
func (d *Docx) ReplaceRaw(oldString string, newString string, num int) {
d.content = strings.Replace(d.content, oldString, newString, num)
}
func (d *Docx) Replace(oldString string, newString string, num int) (err error) {
oldString, err = encode(oldString)
if err != nil {
return err
}
newString, err = encode(newString)
if err != nil {
return err
}
d.content = strings.Replace(d.content, oldString, newString, num)
return nil
}
func (d *Docx) ReplaceLink(oldString string, newString string, num int) (err error) {
oldString, err = encode(oldString)
if err != nil {
return err
}
newString, err = encode(newString)
if err != nil {
return err
}
d.links = strings.Replace(d.links, oldString, newString, num)
return nil
}
func (d *Docx) ReplaceHeader(oldString string, newString string) (err error) {
return replaceHeaderFooter(d.headers, oldString, newString)
}
func (d *Docx) ReplaceFooter(oldString string, newString string) (err error) {
return replaceHeaderFooter(d.footers, oldString, newString)
}
func (d *Docx) WriteToFile(path string) (err error) {
var target *os.File
target, err = os.Create(path)
if err != nil {
return
}
defer target.Close()
err = d.Write(target)
return
}
func (d *Docx) Write(ioWriter io.Writer) (err error) {
w := zip.NewWriter(ioWriter)
for _, file := range d.files {
var writer io.Writer
var readCloser io.ReadCloser
writer, err = w.Create(file.Name)
if err != nil {
return err
}
readCloser, err = file.Open()
if err != nil {
return err
}
if file.Name == "word/document.xml" {
writer.Write([]byte(d.content))
} else if file.Name == "word/_rels/document.xml.rels" {
writer.Write([]byte(d.links))
} else if strings.Contains(file.Name, "header") && d.headers[file.Name] != "" {
writer.Write([]byte(d.headers[file.Name]))
} else if strings.Contains(file.Name, "footer") && d.footers[file.Name] != "" {
writer.Write([]byte(d.footers[file.Name]))
} else if strings.HasPrefix(file.Name, "word/media/") && d.images[file.Name] != "" {
newImage, err := os.Open(d.images[file.Name])
if err != nil {
return err
}
writer.Write(streamToByte(newImage))
newImage.Close()
} else {
writer.Write(streamToByte(readCloser))
}
}
w.Close()
return
}
func replaceHeaderFooter(headerFooter map[string]string, oldString string, newString string) (err error) {
oldString, err = encode(oldString)
if err != nil {
return err
}
newString, err = encode(newString)
if err != nil {
return err
}
for k := range headerFooter {
headerFooter[k] = strings.Replace(headerFooter[k], oldString, newString, -1)
}
return nil
}
// ReadDocxFromFS opens a docx file from the file system
func ReadDocxFromFS(file string, fs fs.FS) (*ReplaceDocx, error) {
f, err := fs.Open(file)
if err != nil {
return nil, err
}
buff := bytes.NewBuffer([]byte{})
size, err := io.Copy(buff, f)
if err != nil {
return nil, err
}
reader := bytes.NewReader(buff.Bytes())
return ReadDocxFromMemory(reader, size)
}
func ReadDocxFromMemory(data io.ReaderAt, size int64) (*ReplaceDocx, error) {
reader, err := zip.NewReader(data, size)
if err != nil {
return nil, err
}
zipData := ZipInMemory{data: reader}
return ReadDocx(zipData)
}
func ReadDocxFile(path string) (*ReplaceDocx, error) {
reader, err := zip.OpenReader(path)
if err != nil {
return nil, err
}
zipData := ZipFile{data: reader}
return ReadDocx(zipData)
}
func ReadDocx(reader ZipData) (*ReplaceDocx, error) {
content, err := readText(reader.files())
if err != nil {
return nil, err
}
links, err := readLinks(reader.files())
if err != nil {
return nil, err
}
headers, footers, _ := readHeaderFooter(reader.files())
images, _ := retrieveImageFilenames(reader.files())
return &ReplaceDocx{zipReader: reader, content: content, links: links, headers: headers, footers: footers, images: images}, nil
}
func retrieveImageFilenames(files []*zip.File) (map[string]string, error) {
images := make(map[string]string)
for _, f := range files {
if strings.HasPrefix(f.Name, "word/media/") {
images[f.Name] = ""
}
}
return images, nil
}
func readHeaderFooter(files []*zip.File) (headerText map[string]string, footerText map[string]string, err error) {
h, f, err := retrieveHeaderFooterDoc(files)
if err != nil {
return map[string]string{}, map[string]string{}, err
}
headerText, err = buildHeaderFooter(h)
if err != nil {
return map[string]string{}, map[string]string{}, err
}
footerText, err = buildHeaderFooter(f)
if err != nil {
return map[string]string{}, map[string]string{}, err
}
return headerText, footerText, err
}
func buildHeaderFooter(headerFooter []*zip.File) (map[string]string, error) {
headerFooterText := make(map[string]string)
for _, element := range headerFooter {
documentReader, err := element.Open()
if err != nil {
return map[string]string{}, err
}
text, err := wordDocToString(documentReader)
if err != nil {
return map[string]string{}, err
}
headerFooterText[element.Name] = text
}
return headerFooterText, nil
}
func readText(files []*zip.File) (text string, err error) {
var documentFile *zip.File
documentFile, err = retrieveWordDoc(files)
if err != nil {
return text, err
}
var documentReader io.ReadCloser
documentReader, err = documentFile.Open()
if err != nil {
return text, err
}
text, err = wordDocToString(documentReader)
return
}
func readLinks(files []*zip.File) (text string, err error) {
var documentFile *zip.File
documentFile, err = retrieveLinkDoc(files)
if err != nil {
return text, err
}
var documentReader io.ReadCloser
documentReader, err = documentFile.Open()
if err != nil {
return text, err
}
text, err = wordDocToString(documentReader)
return
}
func wordDocToString(reader io.Reader) (string, error) {
b, err := io.ReadAll(reader)
if err != nil {
return "", err
}
return string(b), nil
}
func retrieveWordDoc(files []*zip.File) (file *zip.File, err error) {
for _, f := range files {
if f.Name == "word/document.xml" {
file = f
}
}
if file == nil {
err = fmt.Errorf("document.xml file not found")
}
return
}
func retrieveLinkDoc(files []*zip.File) (file *zip.File, err error) {
for _, f := range files {
if f.Name == "word/_rels/document.xml.rels" {
file = f
}
}
if file == nil {
err = fmt.Errorf("document.xml.rels file not found")
}
return
}
func retrieveHeaderFooterDoc(files []*zip.File) (headers []*zip.File, footers []*zip.File, err error) {
for _, f := range files {
if strings.Contains(f.Name, "header") {
headers = append(headers, f)
}
if strings.Contains(f.Name, "footer") {
footers = append(footers, f)
}
}
if len(headers) == 0 && len(footers) == 0 {
err = fmt.Errorf("headers[1-3].xml file not found and footers[1-3].xml file not found")
}
return
}
func streamToByte(stream io.Reader) []byte {
buf := new(bytes.Buffer)
buf.ReadFrom(stream)
return buf.Bytes()
}
// To get Word to recognize a tab character, we have to first close off the previous
// text element. This means if there are multiple consecutive tabs, there are empty <w:t></w:t>
// in between but it still seems to work correctly in the output document, certainly better
// than other combinations I tried.
const TAB = "</w:t><w:tab/><w:t>"
const NEWLINE = "<w:br/>"
func encode(s string) (string, error) {
var b bytes.Buffer
enc := xml.NewEncoder(bufio.NewWriter(&b))
if err := enc.Encode(s); err != nil {
return s, err
}
output := strings.Replace(b.String(), "<string>", "", 1) // remove string tag
output = strings.Replace(output, "</string>", "", 1)
output = strings.Replace(output, "&#xD;&#xA;", NEWLINE, -1) // \r\n (Windows newline)
output = strings.Replace(output, "&#xD;", NEWLINE, -1) // \r (earlier Mac newline)
output = strings.Replace(output, "&#xA;", NEWLINE, -1) // \n (unix/linux/OS X newline)
output = strings.Replace(output, "&#x9;", TAB, -1) // \t (tab)
return output, nil
}
func (d *Docx) ReplaceImage(oldImage string, newImage string) (err error) {
if _, ok := d.images[oldImage]; ok {
d.images[oldImage] = newImage
return nil
}
return fmt.Errorf("old image: %q, file not found", oldImage)
}
func (d *Docx) ImagesLen() int {
return len(d.images)
}

31
godo/ai/convert/epub.go → godo/office/epub.go

@ -1,21 +1,4 @@
/*
* GodoOS - A lightweight cloud desktop
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package convert
package office
import (
"archive/zip"
@ -26,19 +9,13 @@ import (
"log"
"os"
"path"
"godo/ai/convert/libs"
)
const containerPath = "META-INF/container.xml"
func ConvetEpub(r io.Reader) (string, error) {
func epub2txt(filename string) (string, error) {
text := ""
fpath, tmpfile, err := libs.GetTempFile(r, "prefix-epub")
if err != nil {
return "", err
}
rc, _ := OpenReader(fpath)
rc, _ := OpenReader(filename)
book := rc.Rootfiles[0]
// Print book title.
@ -62,8 +39,6 @@ func ConvetEpub(r io.Reader) (string, error) {
if text == "" {
return "", nil
}
defer libs.CloseTempFile(tmpfile)
return title + text, nil
}

24
godo/office/etree/LICENSE

@ -0,0 +1,24 @@
Copyright 2015-2024 Brett Vickers. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

205
godo/office/etree/README.md

@ -0,0 +1,205 @@
[![GoDoc](https://godoc.org/github.com/beevik/etree?status.svg)](https://godoc.org/github.com/beevik/etree)
[![Go](https://github.com/beevik/etree/actions/workflows/go.yml/badge.svg)](https://github.com/beevik/etree/actions/workflows/go.yml)
etree
=====
The etree package is a lightweight, pure go package that expresses XML in
the form of an element tree. Its design was inspired by the Python
[ElementTree](http://docs.python.org/2/library/xml.etree.elementtree.html)
module.
Some of the package's capabilities and features:
* Represents XML documents as trees of elements for easy traversal.
* Imports, serializes, modifies or creates XML documents from scratch.
* Writes and reads XML to/from files, byte slices, strings and io interfaces.
* Performs simple or complex searches with lightweight XPath-like query APIs.
* Auto-indents XML using spaces or tabs for better readability.
* Implemented in pure go; depends only on standard go libraries.
* Built on top of the go [encoding/xml](http://golang.org/pkg/encoding/xml)
package.
### Creating an XML document
The following example creates an XML document from scratch using the etree
package and outputs its indented contents to stdout.
```go
doc := etree.NewDocument()
doc.CreateProcInst("xml", `version="1.0" encoding="UTF-8"`)
doc.CreateProcInst("xml-stylesheet", `type="text/xsl" href="style.xsl"`)
people := doc.CreateElement("People")
people.CreateComment("These are all known people")
jon := people.CreateElement("Person")
jon.CreateAttr("name", "Jon")
sally := people.CreateElement("Person")
sally.CreateAttr("name", "Sally")
doc.Indent(2)
doc.WriteTo(os.Stdout)
```
Output:
```xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="style.xsl"?>
<People>
<!--These are all known people-->
<Person name="Jon"/>
<Person name="Sally"/>
</People>
```
### Reading an XML file
Suppose you have a file on disk called `bookstore.xml` containing the
following data:
```xml
<bookstore xmlns:p="urn:schemas-books-com:prices">
<book category="COOKING">
<title lang="en">Everyday Italian</title>
<author>Giada De Laurentiis</author>
<year>2005</year>
<p:price>30.00</p:price>
</book>
<book category="CHILDREN">
<title lang="en">Harry Potter</title>
<author>J K. Rowling</author>
<year>2005</year>
<p:price>29.99</p:price>
</book>
<book category="WEB">
<title lang="en">XQuery Kick Start</title>
<author>James McGovern</author>
<author>Per Bothner</author>
<author>Kurt Cagle</author>
<author>James Linn</author>
<author>Vaidyanathan Nagarajan</author>
<year>2003</year>
<p:price>49.99</p:price>
</book>
<book category="WEB">
<title lang="en">Learning XML</title>
<author>Erik T. Ray</author>
<year>2003</year>
<p:price>39.95</p:price>
</book>
</bookstore>
```
This code reads the file's contents into an etree document.
```go
doc := etree.NewDocument()
if err := doc.ReadFromFile("bookstore.xml"); err != nil {
panic(err)
}
```
You can also read XML from a string, a byte slice, or an `io.Reader`.
### Processing elements and attributes
This example illustrates several ways to access elements and attributes using
etree selection queries.
```go
root := doc.SelectElement("bookstore")
fmt.Println("ROOT element:", root.Tag)
for _, book := range root.SelectElements("book") {
fmt.Println("CHILD element:", book.Tag)
if title := book.SelectElement("title"); title != nil {
lang := title.SelectAttrValue("lang", "unknown")
fmt.Printf(" TITLE: %s (%s)\n", title.Text(), lang)
}
for _, attr := range book.Attr {
fmt.Printf(" ATTR: %s=%s\n", attr.Key, attr.Value)
}
}
```
Output:
```
ROOT element: bookstore
CHILD element: book
TITLE: Everyday Italian (en)
ATTR: category=COOKING
CHILD element: book
TITLE: Harry Potter (en)
ATTR: category=CHILDREN
CHILD element: book
TITLE: XQuery Kick Start (en)
ATTR: category=WEB
CHILD element: book
TITLE: Learning XML (en)
ATTR: category=WEB
```
### Path queries
This example uses etree's path functions to select all book titles that fall
into the category of 'WEB'. The double-slash prefix in the path causes the
search for book elements to occur recursively; book elements may appear at any
level of the XML hierarchy.
```go
for _, t := range doc.FindElements("//book[@category='WEB']/title") {
fmt.Println("Title:", t.Text())
}
```
Output:
```
Title: XQuery Kick Start
Title: Learning XML
```
This example finds the first book element under the root bookstore element and
outputs the tag and text of each of its child elements.
```go
for _, e := range doc.FindElements("./bookstore/book[1]/*") {
fmt.Printf("%s: %s\n", e.Tag, e.Text())
}
```
Output:
```
title: Everyday Italian
author: Giada De Laurentiis
year: 2005
price: 30.00
```
This example finds all books with a price of 49.99 and outputs their titles.
```go
path := etree.MustCompilePath("./bookstore/book[p:price='49.99']/title")
for _, e := range doc.FindElementsPath(path) {
fmt.Println(e.Text())
}
```
Output:
```
XQuery Kick Start
```
Note that this example uses the FindElementsPath function, which takes as an
argument a pre-compiled path object. Use precompiled paths when you plan to
search with the same path more than once.
### Other features
These are just a few examples of the things the etree package can do. See the
[documentation](http://godoc.org/github.com/beevik/etree) for a complete
description of its capabilities.
### Contributing
This project accepts contributions. Just fork the repo and submit a pull
request!

1810
godo/office/etree/etree.go

File diff suppressed because it is too large

394
godo/office/etree/helpers.go

@ -0,0 +1,394 @@
// Copyright 2015-2019 Brett Vickers.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package etree
import (
"io"
"strings"
"unicode/utf8"
)
type stack[E any] struct {
data []E
}
func (s *stack[E]) empty() bool {
return len(s.data) == 0
}
func (s *stack[E]) push(value E) {
s.data = append(s.data, value)
}
func (s *stack[E]) pop() E {
value := s.data[len(s.data)-1]
var empty E
s.data[len(s.data)-1] = empty
s.data = s.data[:len(s.data)-1]
return value
}
func (s *stack[E]) peek() E {
return s.data[len(s.data)-1]
}
type queue[E any] struct {
data []E
head, tail int
}
func (f *queue[E]) add(value E) {
if f.len()+1 >= len(f.data) {
f.grow()
}
f.data[f.tail] = value
if f.tail++; f.tail == len(f.data) {
f.tail = 0
}
}
func (f *queue[E]) remove() E {
value := f.data[f.head]
var empty E
f.data[f.head] = empty
if f.head++; f.head == len(f.data) {
f.head = 0
}
return value
}
func (f *queue[E]) len() int {
if f.tail >= f.head {
return f.tail - f.head
}
return len(f.data) - f.head + f.tail
}
func (f *queue[E]) grow() {
c := len(f.data) * 2
if c == 0 {
c = 4
}
buf, count := make([]E, c), f.len()
if f.tail >= f.head {
copy(buf[:count], f.data[f.head:f.tail])
} else {
hindex := len(f.data) - f.head
copy(buf[:hindex], f.data[f.head:])
copy(buf[hindex:count], f.data[:f.tail])
}
f.data, f.head, f.tail = buf, 0, count
}
// xmlReader provides the interface by which an XML byte stream is
// processed and decoded.
type xmlReader interface {
Bytes() int64
Read(p []byte) (n int, err error)
}
// xmlSimpleReader implements a proxy reader that counts the number of
// bytes read from its encapsulated reader.
type xmlSimpleReader struct {
r io.Reader
bytes int64
}
func newXmlSimpleReader(r io.Reader) xmlReader {
return &xmlSimpleReader{r, 0}
}
func (xr *xmlSimpleReader) Bytes() int64 {
return xr.bytes
}
func (xr *xmlSimpleReader) Read(p []byte) (n int, err error) {
n, err = xr.r.Read(p)
xr.bytes += int64(n)
return n, err
}
// xmlPeekReader implements a proxy reader that counts the number of
// bytes read from its encapsulated reader. It also allows the caller to
// "peek" at the previous portions of the buffer after they have been
// parsed.
type xmlPeekReader struct {
r io.Reader
bytes int64 // total bytes read by the Read function
buf []byte // internal read buffer
bufSize int // total bytes used in the read buffer
bufOffset int64 // total bytes read when buf was last filled
window []byte // current read buffer window
peekBuf []byte // buffer used to store data to be peeked at later
peekOffset int64 // total read offset of the start of the peek buffer
}
func newXmlPeekReader(r io.Reader) *xmlPeekReader {
buf := make([]byte, 4096)
return &xmlPeekReader{
r: r,
bytes: 0,
buf: buf,
bufSize: 0,
bufOffset: 0,
window: buf[0:0],
peekBuf: make([]byte, 0),
peekOffset: -1,
}
}
func (xr *xmlPeekReader) Bytes() int64 {
return xr.bytes
}
func (xr *xmlPeekReader) Read(p []byte) (n int, err error) {
if len(xr.window) == 0 {
err = xr.fill()
if err != nil {
return 0, err
}
if len(xr.window) == 0 {
return 0, nil
}
}
if len(xr.window) < len(p) {
n = len(xr.window)
} else {
n = len(p)
}
copy(p, xr.window)
xr.window = xr.window[n:]
xr.bytes += int64(n)
return n, err
}
func (xr *xmlPeekReader) PeekPrepare(offset int64, maxLen int) {
if maxLen > cap(xr.peekBuf) {
xr.peekBuf = make([]byte, 0, maxLen)
}
xr.peekBuf = xr.peekBuf[0:0]
xr.peekOffset = offset
xr.updatePeekBuf()
}
func (xr *xmlPeekReader) PeekFinalize() []byte {
xr.updatePeekBuf()
return xr.peekBuf
}
func (xr *xmlPeekReader) fill() error {
xr.bufOffset = xr.bytes
xr.bufSize = 0
n, err := xr.r.Read(xr.buf)
if err != nil {
xr.window, xr.bufSize = xr.buf[0:0], 0
return err
}
xr.window, xr.bufSize = xr.buf[:n], n
xr.updatePeekBuf()
return nil
}
func (xr *xmlPeekReader) updatePeekBuf() {
peekRemain := cap(xr.peekBuf) - len(xr.peekBuf)
if xr.peekOffset >= 0 && peekRemain > 0 {
rangeMin := xr.peekOffset
rangeMax := xr.peekOffset + int64(cap(xr.peekBuf))
bufMin := xr.bufOffset
bufMax := xr.bufOffset + int64(xr.bufSize)
if rangeMin < bufMin {
rangeMin = bufMin
}
if rangeMax > bufMax {
rangeMax = bufMax
}
if rangeMax > rangeMin {
rangeMin -= xr.bufOffset
rangeMax -= xr.bufOffset
if int(rangeMax-rangeMin) > peekRemain {
rangeMax = rangeMin + int64(peekRemain)
}
xr.peekBuf = append(xr.peekBuf, xr.buf[rangeMin:rangeMax]...)
}
}
}
// xmlWriter implements a proxy writer that counts the number of
// bytes written by its encapsulated writer.
type xmlWriter struct {
w io.Writer
bytes int64
}
func newXmlWriter(w io.Writer) *xmlWriter {
return &xmlWriter{w: w}
}
func (xw *xmlWriter) Write(p []byte) (n int, err error) {
n, err = xw.w.Write(p)
xw.bytes += int64(n)
return n, err
}
// isWhitespace returns true if the byte slice contains only
// whitespace characters.
func isWhitespace(s string) bool {
for i := 0; i < len(s); i++ {
if c := s[i]; c != ' ' && c != '\t' && c != '\n' && c != '\r' {
return false
}
}
return true
}
// spaceMatch returns true if namespace a is the empty string
// or if namespace a equals namespace b.
func spaceMatch(a, b string) bool {
switch {
case a == "":
return true
default:
return a == b
}
}
// spaceDecompose breaks a namespace:tag identifier at the ':'
// and returns the two parts.
func spaceDecompose(str string) (space, key string) {
colon := strings.IndexByte(str, ':')
if colon == -1 {
return "", str
}
return str[:colon], str[colon+1:]
}
// Strings used by indentCRLF and indentLF
const (
indentSpaces = "\r\n "
indentTabs = "\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t"
)
// indentCRLF returns a CRLF newline followed by n copies of the first
// non-CRLF character in the source string.
func indentCRLF(n int, source string) string {
switch {
case n < 0:
return source[:2]
case n < len(source)-1:
return source[:n+2]
default:
return source + strings.Repeat(source[2:3], n-len(source)+2)
}
}
// indentLF returns a LF newline followed by n copies of the first non-LF
// character in the source string.
func indentLF(n int, source string) string {
switch {
case n < 0:
return source[1:2]
case n < len(source)-1:
return source[1 : n+2]
default:
return source[1:] + strings.Repeat(source[2:3], n-len(source)+2)
}
}
// nextIndex returns the index of the next occurrence of byte ch in s,
// starting from offset. It returns -1 if the byte is not found.
func nextIndex(s string, ch byte, offset int) int {
switch i := strings.IndexByte(s[offset:], ch); i {
case -1:
return -1
default:
return offset + i
}
}
// isInteger returns true if the string s contains an integer.
func isInteger(s string) bool {
for i := 0; i < len(s); i++ {
if (s[i] < '0' || s[i] > '9') && !(i == 0 && s[i] == '-') {
return false
}
}
return true
}
type escapeMode byte
const (
escapeNormal escapeMode = iota
escapeCanonicalText
escapeCanonicalAttr
)
// escapeString writes an escaped version of a string to the writer.
func escapeString(w Writer, s string, m escapeMode) {
var esc []byte
last := 0
for i := 0; i < len(s); {
r, width := utf8.DecodeRuneInString(s[i:])
i += width
switch r {
case '&':
esc = []byte("&amp;")
case '<':
esc = []byte("&lt;")
case '>':
if m == escapeCanonicalAttr {
continue
}
esc = []byte("&gt;")
case '\'':
if m != escapeNormal {
continue
}
esc = []byte("&apos;")
case '"':
if m == escapeCanonicalText {
continue
}
esc = []byte("&quot;")
case '\t':
if m != escapeCanonicalAttr {
continue
}
esc = []byte("&#x9;")
case '\n':
if m != escapeCanonicalAttr {
continue
}
esc = []byte("&#xA;")
case '\r':
if m == escapeNormal {
continue
}
esc = []byte("&#xD;")
default:
if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) {
esc = []byte("\uFFFD")
break
}
continue
}
w.WriteString(s[last : i-width])
w.Write(esc)
last = i
}
w.WriteString(s[last:])
}
func isInCharacterRange(r rune) bool {
return r == 0x09 ||
r == 0x0A ||
r == 0x0D ||
r >= 0x20 && r <= 0xD7FF ||
r >= 0xE000 && r <= 0xFFFD ||
r >= 0x10000 && r <= 0x10FFFF
}

595
godo/office/etree/path.go

@ -0,0 +1,595 @@
// Copyright 2015-2019 Brett Vickers.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package etree
import (
"strconv"
"strings"
)
/*
A Path is a string that represents a search path through an etree starting
from the document root or an arbitrary element. Paths are used with the
Element object's Find* methods to locate and return desired elements.
A Path consists of a series of slash-separated "selectors", each of which may
be modified by one or more bracket-enclosed "filters". Selectors are used to
traverse the etree from element to element, while filters are used to narrow
the list of candidate elements at each node.
Although etree Path strings are structurally and behaviorally similar to XPath
strings (https://www.w3.org/TR/1999/REC-xpath-19991116/), they have a more
limited set of selectors and filtering options.
The following selectors are supported by etree paths:
. Select the current element.
.. Select the parent of the current element.
* Select all child elements of the current element.
/ Select the root element when used at the start of a path.
// Select all descendants of the current element.
tag Select all child elements with a name matching the tag.
The following basic filters are supported:
[@attrib] Keep elements with an attribute named attrib.
[@attrib='val'] Keep elements with an attribute named attrib and value matching val.
[tag] Keep elements with a child element named tag.
[tag='val'] Keep elements with a child element named tag and text matching val.
[n] Keep the n-th element, where n is a numeric index starting from 1.
The following function-based filters are supported:
[text()] Keep elements with non-empty text.
[text()='val'] Keep elements whose text matches val.
[local-name()='val'] Keep elements whose un-prefixed tag matches val.
[name()='val'] Keep elements whose full tag exactly matches val.
[namespace-prefix()] Keep elements with non-empty namespace prefixes.
[namespace-prefix()='val'] Keep elements whose namespace prefix matches val.
[namespace-uri()] Keep elements with non-empty namespace URIs.
[namespace-uri()='val'] Keep elements whose namespace URI matches val.
Below are some examples of etree path strings.
Select the bookstore child element of the root element:
/bookstore
Beginning from the root element, select the title elements of all descendant
book elements having a 'category' attribute of 'WEB':
//book[@category='WEB']/title
Beginning from the current element, select the first descendant book element
with a title child element containing the text 'Great Expectations':
.//book[title='Great Expectations'][1]
Beginning from the current element, select all child elements of book elements
with an attribute 'language' set to 'english':
./book/*[@language='english']
Beginning from the current element, select all child elements of book elements
containing the text 'special':
./book/*[text()='special']
Beginning from the current element, select all descendant book elements whose
title child element has a 'language' attribute of 'french':
.//book/title[@language='french']/..
Beginning from the current element, select all descendant book elements
belonging to the http://www.w3.org/TR/html4/ namespace:
.//book[namespace-uri()='http://www.w3.org/TR/html4/']
*/
type Path struct {
segments []segment
}
// ErrPath is returned by path functions when an invalid etree path is provided.
type ErrPath string
// Error returns the string describing a path error.
func (err ErrPath) Error() string {
return "etree: " + string(err)
}
// CompilePath creates an optimized version of an XPath-like string that
// can be used to query elements in an element tree.
func CompilePath(path string) (Path, error) {
var comp compiler
segments := comp.parsePath(path)
if comp.err != ErrPath("") {
return Path{nil}, comp.err
}
return Path{segments}, nil
}
// MustCompilePath creates an optimized version of an XPath-like string that
// can be used to query elements in an element tree. Panics if an error
// occurs. Use this function to create Paths when you know the path is
// valid (i.e., if it's hard-coded).
func MustCompilePath(path string) Path {
p, err := CompilePath(path)
if err != nil {
panic(err)
}
return p
}
// A segment is a portion of a path between "/" characters.
// It contains one selector and zero or more [filters].
type segment struct {
sel selector
filters []filter
}
func (seg *segment) apply(e *Element, p *pather) {
seg.sel.apply(e, p)
for _, f := range seg.filters {
f.apply(p)
}
}
// A selector selects XML elements for consideration by the
// path traversal.
type selector interface {
apply(e *Element, p *pather)
}
// A filter pares down a list of candidate XML elements based
// on a path filter in [brackets].
type filter interface {
apply(p *pather)
}
// A pather is helper object that traverses an element tree using
// a Path object. It collects and deduplicates all elements matching
// the path query.
type pather struct {
queue queue[node]
results []*Element
inResults map[*Element]bool
candidates []*Element
scratch []*Element // used by filters
}
// A node represents an element and the remaining path segments that
// should be applied against it by the pather.
type node struct {
e *Element
segments []segment
}
func newPather() *pather {
return &pather{
results: make([]*Element, 0),
inResults: make(map[*Element]bool),
candidates: make([]*Element, 0),
scratch: make([]*Element, 0),
}
}
// traverse follows the path from the element e, collecting
// and then returning all elements that match the path's selectors
// and filters.
func (p *pather) traverse(e *Element, path Path) []*Element {
for p.queue.add(node{e, path.segments}); p.queue.len() > 0; {
p.eval(p.queue.remove())
}
return p.results
}
// eval evaluates the current path node by applying the remaining
// path's selector rules against the node's element.
func (p *pather) eval(n node) {
p.candidates = p.candidates[0:0]
seg, remain := n.segments[0], n.segments[1:]
seg.apply(n.e, p)
if len(remain) == 0 {
for _, c := range p.candidates {
if in := p.inResults[c]; !in {
p.inResults[c] = true
p.results = append(p.results, c)
}
}
} else {
for _, c := range p.candidates {
p.queue.add(node{c, remain})
}
}
}
// A compiler generates a compiled path from a path string.
type compiler struct {
err ErrPath
}
// parsePath parses an XPath-like string describing a path
// through an element tree and returns a slice of segment
// descriptors.
func (c *compiler) parsePath(path string) []segment {
// If path ends with //, fix it
if strings.HasSuffix(path, "//") {
path += "*"
}
var segments []segment
// Check for an absolute path
if strings.HasPrefix(path, "/") {
segments = append(segments, segment{new(selectRoot), []filter{}})
path = path[1:]
}
// Split path into segments
for _, s := range splitPath(path) {
segments = append(segments, c.parseSegment(s))
if c.err != ErrPath("") {
break
}
}
return segments
}
func splitPath(path string) []string {
var pieces []string
start := 0
inquote := false
var quote byte
for i := 0; i+1 <= len(path); i++ {
if !inquote {
if path[i] == '\'' || path[i] == '"' {
inquote, quote = true, path[i]
} else if path[i] == '/' {
pieces = append(pieces, path[start:i])
start = i + 1
}
} else if path[i] == quote {
inquote = false
}
}
return append(pieces, path[start:])
}
// parseSegment parses a path segment between / characters.
func (c *compiler) parseSegment(path string) segment {
pieces := strings.Split(path, "[")
seg := segment{
sel: c.parseSelector(pieces[0]),
filters: []filter{},
}
for i := 1; i < len(pieces); i++ {
fpath := pieces[i]
if len(fpath) == 0 || fpath[len(fpath)-1] != ']' {
c.err = ErrPath("path has invalid filter [brackets].")
break
}
seg.filters = append(seg.filters, c.parseFilter(fpath[:len(fpath)-1]))
}
return seg
}
// parseSelector parses a selector at the start of a path segment.
func (c *compiler) parseSelector(path string) selector {
switch path {
case ".":
return new(selectSelf)
case "..":
return new(selectParent)
case "*":
return new(selectChildren)
case "":
return new(selectDescendants)
default:
return newSelectChildrenByTag(path)
}
}
var fnTable = map[string]func(e *Element) string{
"local-name": (*Element).name,
"name": (*Element).FullTag,
"namespace-prefix": (*Element).namespacePrefix,
"namespace-uri": (*Element).NamespaceURI,
"text": (*Element).Text,
}
// parseFilter parses a path filter contained within [brackets].
func (c *compiler) parseFilter(path string) filter {
if len(path) == 0 {
c.err = ErrPath("path contains an empty filter expression.")
return nil
}
// Filter contains [@attr='val'], [@attr="val"], [fn()='val'],
// [fn()="val"], [tag='val'] or [tag="val"]?
eqindex := strings.IndexByte(path, '=')
if eqindex >= 0 && eqindex+1 < len(path) {
quote := path[eqindex+1]
if quote == '\'' || quote == '"' {
rindex := nextIndex(path, quote, eqindex+2)
if rindex != len(path)-1 {
c.err = ErrPath("path has mismatched filter quotes.")
return nil
}
key := path[:eqindex]
value := path[eqindex+2 : rindex]
switch {
case key[0] == '@':
return newFilterAttrVal(key[1:], value)
case strings.HasSuffix(key, "()"):
name := key[:len(key)-2]
if fn, ok := fnTable[name]; ok {
return newFilterFuncVal(fn, value)
}
c.err = ErrPath("path has unknown function " + name)
return nil
default:
return newFilterChildText(key, value)
}
}
}
// Filter contains [@attr], [N], [tag] or [fn()]
switch {
case path[0] == '@':
return newFilterAttr(path[1:])
case strings.HasSuffix(path, "()"):
name := path[:len(path)-2]
if fn, ok := fnTable[name]; ok {
return newFilterFunc(fn)
}
c.err = ErrPath("path has unknown function " + name)
return nil
case isInteger(path):
pos, _ := strconv.Atoi(path)
switch {
case pos > 0:
return newFilterPos(pos - 1)
default:
return newFilterPos(pos)
}
default:
return newFilterChild(path)
}
}
// selectSelf selects the current element into the candidate list.
type selectSelf struct{}
func (s *selectSelf) apply(e *Element, p *pather) {
p.candidates = append(p.candidates, e)
}
// selectRoot selects the element's root node.
type selectRoot struct{}
func (s *selectRoot) apply(e *Element, p *pather) {
root := e
for root.parent != nil {
root = root.parent
}
p.candidates = append(p.candidates, root)
}
// selectParent selects the element's parent into the candidate list.
type selectParent struct{}
func (s *selectParent) apply(e *Element, p *pather) {
if e.parent != nil {
p.candidates = append(p.candidates, e.parent)
}
}
// selectChildren selects the element's child elements into the
// candidate list.
type selectChildren struct{}
func (s *selectChildren) apply(e *Element, p *pather) {
for _, c := range e.Child {
if c, ok := c.(*Element); ok {
p.candidates = append(p.candidates, c)
}
}
}
// selectDescendants selects all descendant child elements
// of the element into the candidate list.
type selectDescendants struct{}
func (s *selectDescendants) apply(e *Element, p *pather) {
var queue queue[*Element]
for queue.add(e); queue.len() > 0; {
e := queue.remove()
p.candidates = append(p.candidates, e)
for _, c := range e.Child {
if c, ok := c.(*Element); ok {
queue.add(c)
}
}
}
}
// selectChildrenByTag selects into the candidate list all child
// elements of the element having the specified tag.
type selectChildrenByTag struct {
space, tag string
}
func newSelectChildrenByTag(path string) *selectChildrenByTag {
s, l := spaceDecompose(path)
return &selectChildrenByTag{s, l}
}
func (s *selectChildrenByTag) apply(e *Element, p *pather) {
for _, c := range e.Child {
if c, ok := c.(*Element); ok && spaceMatch(s.space, c.Space) && s.tag == c.Tag {
p.candidates = append(p.candidates, c)
}
}
}
// filterPos filters the candidate list, keeping only the
// candidate at the specified index.
type filterPos struct {
index int
}
func newFilterPos(pos int) *filterPos {
return &filterPos{pos}
}
func (f *filterPos) apply(p *pather) {
if f.index >= 0 {
if f.index < len(p.candidates) {
p.scratch = append(p.scratch, p.candidates[f.index])
}
} else {
if -f.index <= len(p.candidates) {
p.scratch = append(p.scratch, p.candidates[len(p.candidates)+f.index])
}
}
p.candidates, p.scratch = p.scratch, p.candidates[0:0]
}
// filterAttr filters the candidate list for elements having
// the specified attribute.
type filterAttr struct {
space, key string
}
func newFilterAttr(str string) *filterAttr {
s, l := spaceDecompose(str)
return &filterAttr{s, l}
}
func (f *filterAttr) apply(p *pather) {
for _, c := range p.candidates {
for _, a := range c.Attr {
if spaceMatch(f.space, a.Space) && f.key == a.Key {
p.scratch = append(p.scratch, c)
break
}
}
}
p.candidates, p.scratch = p.scratch, p.candidates[0:0]
}
// filterAttrVal filters the candidate list for elements having
// the specified attribute with the specified value.
type filterAttrVal struct {
space, key, val string
}
func newFilterAttrVal(str, value string) *filterAttrVal {
s, l := spaceDecompose(str)
return &filterAttrVal{s, l, value}
}
func (f *filterAttrVal) apply(p *pather) {
for _, c := range p.candidates {
for _, a := range c.Attr {
if spaceMatch(f.space, a.Space) && f.key == a.Key && f.val == a.Value {
p.scratch = append(p.scratch, c)
break
}
}
}
p.candidates, p.scratch = p.scratch, p.candidates[0:0]
}
// filterFunc filters the candidate list for elements satisfying a custom
// boolean function.
type filterFunc struct {
fn func(e *Element) string
}
func newFilterFunc(fn func(e *Element) string) *filterFunc {
return &filterFunc{fn}
}
func (f *filterFunc) apply(p *pather) {
for _, c := range p.candidates {
if f.fn(c) != "" {
p.scratch = append(p.scratch, c)
}
}
p.candidates, p.scratch = p.scratch, p.candidates[0:0]
}
// filterFuncVal filters the candidate list for elements containing a value
// matching the result of a custom function.
type filterFuncVal struct {
fn func(e *Element) string
val string
}
func newFilterFuncVal(fn func(e *Element) string, value string) *filterFuncVal {
return &filterFuncVal{fn, value}
}
func (f *filterFuncVal) apply(p *pather) {
for _, c := range p.candidates {
if f.fn(c) == f.val {
p.scratch = append(p.scratch, c)
}
}
p.candidates, p.scratch = p.scratch, p.candidates[0:0]
}
// filterChild filters the candidate list for elements having
// a child element with the specified tag.
type filterChild struct {
space, tag string
}
func newFilterChild(str string) *filterChild {
s, l := spaceDecompose(str)
return &filterChild{s, l}
}
func (f *filterChild) apply(p *pather) {
for _, c := range p.candidates {
for _, cc := range c.Child {
if cc, ok := cc.(*Element); ok &&
spaceMatch(f.space, cc.Space) &&
f.tag == cc.Tag {
p.scratch = append(p.scratch, c)
}
}
}
p.candidates, p.scratch = p.scratch, p.candidates[0:0]
}
// filterChildText filters the candidate list for elements having
// a child element with the specified tag and text.
type filterChildText struct {
space, tag, text string
}
func newFilterChildText(str, text string) *filterChildText {
s, l := spaceDecompose(str)
return &filterChildText{s, l, text}
}
func (f *filterChildText) apply(p *pather) {
for _, c := range p.candidates {
for _, cc := range c.Child {
if cc, ok := cc.(*Element); ok &&
spaceMatch(f.space, cc.Space) &&
f.tag == cc.Tag &&
f.text == cc.Text() {
p.scratch = append(p.scratch, c)
}
}
}
p.candidates, p.scratch = p.scratch, p.candidates[0:0]
}

10
godo/ai/convert/tidy.go → godo/office/etree/tidy.go

@ -15,20 +15,18 @@
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package convert
package etree
import (
"bytes"
"fmt"
"io"
"github.com/beevik/etree"
)
// TidyWithEtree 使用beevik/etree库进行简单的XML清理
func Tidy(r io.Reader) ([]byte, error) {
// 读取并解析XML
doc := etree.NewDocument()
doc := NewDocument()
if _, err := doc.ReadFrom(r); err != nil {
return nil, fmt.Errorf("error reading and parsing XML: %w", err)
}
@ -46,10 +44,10 @@ func Tidy(r io.Reader) ([]byte, error) {
}
// removeEmptyNodes 遍历XML树并移除空节点
func removeEmptyNodes(node *etree.Element) {
func removeEmptyNodes(node *Element) {
for i := len(node.Child) - 1; i >= 0; i-- { // 逆序遍历以安全删除
token := node.Child[i]
element, ok := token.(*etree.Element) // 检查是否为etree.Element类型
element, ok := token.(*Element) // 检查是否为Element类型
if ok {
text := element.Text() // 获取元素的文本
if text == "" && len(element.Attr) == 0 && len(element.Child) == 0 {

30
godo/ai/convert/html.go → godo/office/html.go

@ -1,31 +1,25 @@
/*
* GodoOS - A lightweight cloud desktop
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package convert
package office
import (
"bytes"
"io"
"os"
"regexp"
"strings"
"golang.org/x/net/html"
)
func html2txt(filename string) (string, error) {
file, err := os.Open(filename)
if err != nil {
return "", err
}
defer file.Close()
return ConvertHTML(file)
}
// 去除字符串中的HTML标签
func TrimHtml(text string) string {
// 去除字符串中的HTML标签

64
godo/office/json.go

@ -0,0 +1,64 @@
package office
import (
"encoding/json"
"io/ioutil"
"os"
"regexp"
"strings"
)
// extractTextFromJSON 递归地从 JSON 数据中提取纯文本
func extractTextFromJSON(data interface{}) []string {
var texts []string
switch v := data.(type) {
case map[string]interface{}:
for _, value := range v {
texts = append(texts, extractTextFromJSON(value)...)
}
case []interface{}:
for _, item := range v {
texts = append(texts, extractTextFromJSON(item)...)
}
case string:
texts = append(texts, v)
default:
// 其他类型忽略
}
return texts
}
func json2txt(filename string) (string, error) {
file, err := os.Open(filename)
if err != nil {
return "", err
}
defer file.Close()
byteValue, err := ioutil.ReadAll(file)
if err != nil {
return "", err
}
var jsonData interface{}
err = json.Unmarshal(byteValue, &jsonData)
if err != nil {
return "", err
}
plainText := extractTextFromJSON(jsonData)
// 将切片中的所有字符串连接成一个字符串
plainTextStr := strings.Join(plainText, " ")
// 移除多余的空格
re := regexp.MustCompile(`\s+`)
plainTextStr = re.ReplaceAllString(plainTextStr, " ")
// 移除开头和结尾的空格
plainTextStr = strings.TrimSpace(plainTextStr)
return plainTextStr, nil
}

27
godo/office/linux.go

@ -0,0 +1,27 @@
//go:build linux
// +build linux
package office
import (
"os"
"syscall"
"time"
)
func getFileInfoData(data *Document) (bool, error) {
fileinfo, err := os.Stat(data.path)
if err != nil {
return false, err
}
data.Filename = fileinfo.Name()
data.Title = data.Filename
data.Size = int(fileinfo.Size())
stat := fileinfo.Sys().(*syscall.Stat_t)
data.Createtime = time.Unix(stat.Ctim.Sec, stat.Ctim.Nsec)
data.Modifytime = time.Unix(stat.Mtim.Sec, stat.Mtim.Nsec)
data.Accesstime = time.Unix(stat.Atim.Sec, stat.Atim.Nsec)
return true, nil
}

48
godo/office/md.go

@ -0,0 +1,48 @@
package office
import (
"bufio"
"os"
"regexp"
"strings"
)
var (
reHTML = regexp.MustCompile(`<[^>]*>`)
reMarkdown = regexp.MustCompile(`[\*_|#]{1,4}`)
reWhitespace = regexp.MustCompile(`\s+`)
)
func md2txt(filename string) (string, error) {
file, err := os.Open(filename)
if err != nil {
return "", err
}
defer file.Close()
var lines []string
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
// 移除 HTML 标签
line = reHTML.ReplaceAllString(line, "")
// 移除 Markdown 格式符号
line = reMarkdown.ReplaceAllString(line, "")
lines = append(lines, line)
}
if err := scanner.Err(); err != nil {
return "", err
}
// 合并所有行
content := strings.Join(lines, " ")
// 移除多余的空格
content = reWhitespace.ReplaceAllString(content, " ")
// 移除开头和结尾的空格
content = strings.TrimSpace(content)
return content, nil
}

36
godo/ai/convert/odt.go → godo/office/odt.go

@ -1,36 +1,24 @@
/*
* GodoOS - A lightweight cloud desktop
* Copyright (C) 2024 https://godoos.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package convert
package office
import (
"archive/zip"
"bytes"
"fmt"
"io"
"os"
"time"
)
// ConvertODT converts a ODT file to text
func ConvertODT(r io.Reader) (string, error) {
func odt2txt(filePath string) (string, error) {
meta := make(map[string]string)
var textBody string
b, err := io.ReadAll(io.LimitReader(r, maxBytes))
file, err := os.Open(filePath)
if err != nil {
return "", fmt.Errorf("error opening file: %v", err)
}
defer file.Close()
b, err := io.ReadAll(io.LimitReader(file, maxBytes))
if err != nil {
return "", err
}
@ -80,12 +68,6 @@ func ConvertODT(r io.Reader) (string, error) {
}
}
}
// 在成功解析ZIP文件后,添加图片提取逻辑
images, err := findImagesInZip(zr)
if err != nil {
fmt.Printf("Error extracting images: %v", err)
}
fmt.Printf("Images: %v", images)
return textBody, nil
}

321
godo/office/office.go

@ -0,0 +1,321 @@
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
package office
import (
"archive/zip"
"bufio"
"bytes"
"encoding/xml"
"errors"
"fmt"
pdf "godo/office/pdf"
xlsx "godo/office/xlsx"
"html"
"os"
"path"
"path/filepath"
"regexp"
"strings"
)
func GetDocument(pathname string) (*Document, error) {
abPath, err := filepath.Abs(pathname)
if err != nil {
return nil, err
}
filename := path.Base(pathname)
data := Document{path: pathname, RePath: abPath, Title: filename}
extension := path.Ext(pathname)
_, err = getFileInfoData(&data)
if err != nil {
return &data, err
}
switch extension {
case ".docx":
_, e := getMetaData(&data)
if e != nil {
fmt.Printf("⚠️ %s", e.Error())
}
_, err = getContentData(&data, docx2txt)
case ".pptx":
_, e := getMetaData(&data)
if e != nil {
fmt.Printf("⚠️ %s", e.Error())
}
_, err = getContentData(&data, pptx2txt)
case ".xlsx":
_, e := getMetaData(&data)
if e != nil {
fmt.Printf("⚠️ %s", e.Error())
}
_, err = getContentData(&data, xlsx2txt)
case ".pdf":
_, err = getContentData(&data, pdf2txt)
case ".doc":
_, err = getContentData(&data, doc2txt)
case ".ppt":
_, err = getContentData(&data, ppt2txt)
case ".xls":
_, err = getContentData(&data, xls2txt)
case ".epub":
_, err = getContentData(&data, epub2txt)
case ".odt":
_, err = getContentData(&data, odt2txt)
case ".xml":
_, err = getContentData(&data, xml2txt)
case ".rtf":
_, err = getContentData(&data, rtf2txt)
case ".md":
_, err = getContentData(&data, md2txt)
case ".txt":
_, err = getContentData(&data, text2txt)
case ".xhtml", ".html", ".htm":
_, err = getContentData(&data, html2txt)
case ".json":
_, err = getContentData(&data, json2txt)
}
if err != nil {
return &data, err
}
return &data, nil
}
// Read the meta data of office files (only *.docx, *.xlsx, *.pptx) and insert into the interface
func getMetaData(data *Document) (bool, error) {
file, err := os.Open(data.path)
if err != nil {
return false, err
}
defer file.Close()
meta, err := GetContent(file)
if err != nil {
return false, errors.New("failed to get office meta data")
}
if meta.Title != "" {
data.Title = meta.Title
}
data.Subject = meta.Subject
data.Creator = meta.Creator
data.Keywords = meta.Keywords
data.Description = meta.Description
data.Lastmodifiedby = meta.LastModifiedBy
data.Revision = meta.Revision
data.Category = meta.Category
data.Content = meta.Category
return true, nil
}
func GetContent(document *os.File) (fields XMLContent, err error) {
// Attempt to read the document file directly as a zip file.
z, err := zip.OpenReader(document.Name())
if err != nil {
return fields, errors.New("failed to open the file as zip")
}
defer z.Close()
var xmlFile string
for _, file := range z.File {
if file.Name == "docProps/core.xml" {
rc, err := file.Open()
if err != nil {
return fields, errors.New("failed to open docProps/core.xml")
}
defer rc.Close()
scanner := bufio.NewScanner(rc)
for scanner.Scan() {
xmlFile += scanner.Text()
}
if err := scanner.Err(); err != nil {
return fields, errors.New("failed to read from docProps/core.xml")
}
break // Exit loop after finding and reading core.xml
}
}
// Unmarshal the collected XML content into the XMLContent struct
if err := xml.Unmarshal([]byte(xmlFile), &fields); err != nil {
return fields, errors.New("failed to Unmarshal")
}
return fields, nil
}
// Read the content of office files and insert into the interface
func getContentData(data *Document, reader DocReader) (bool, error) {
content, err := reader(data.path)
if err != nil {
return false, err
}
data.Content = content
return true, nil
}
// Read the file information of any files and insert into the interface
func removeStrangeChars(input string) string {
// Define the regex pattern for allowed characters
re := regexp.MustCompile("[�\x13\x0b]+")
// Replace all disallowed characters with an empty string
return re.ReplaceAllString(input, " ")
}
func docx2txt(filename string) (string, error) {
data_docx, err := ReadDocxFile(filename) // Read data from docx file
if err != nil {
return "", err
}
defer data_docx.Close()
text_docx := data_docx.Editable().GetContent() // Get whole docx data as XML formated text
text_docx = PARA_RE.ReplaceAllString(text_docx, "\n") // Replace the end of paragraphs (</w:p) with /n
text_docx = TAG_RE.ReplaceAllString(text_docx, "") // Remove all the tags to extract the content
text_docx = html.UnescapeString(text_docx) // Replace all the html entities (e.g. &amp)
// fmt.Println(text_docx)
return text_docx, nil
}
func pptx2txt(filename string) (string, error) {
data_pptx, err := ReadPowerPoint(filename) // Read data from pptx file
if err != nil {
return "", err
}
data_pptx.DeletePassWord()
slides_pptx := data_pptx.GetSlidesContent() // Get pptx slides data as an array of XML formated text
var text_pptx string
for i := range slides_pptx {
slide_text_pptx := PARA_RE.ReplaceAllString(slides_pptx[i], "\n") // Replace the end of paragraphs (</w:p) with /n
slide_text_pptx = TAG_RE.ReplaceAllString(slide_text_pptx, "") // Remove all the tags to extract the content
slide_text_pptx = html.UnescapeString(slide_text_pptx) // Replace all the html entities (e.g. &amp)
if slide_text_pptx != "" { // Save all slides as ONE string
if text_pptx != "" {
text_pptx = fmt.Sprintf("%s\n%s", text_pptx, slide_text_pptx)
} else {
text_pptx = fmt.Sprintf("%s%s", text_pptx, slide_text_pptx)
}
}
}
// fmt.Println(text_pptx)
return text_pptx, nil
}
func xlsx2txt(filename string) (string, error) {
data_xlsx, err := xlsx.OpenFile(filename) // Read data from xlsx file
if err != nil {
return "", err
}
defer data_xlsx.Close()
var rows_xlsx string
for _, sheet := range data_xlsx.Sheets { // For each sheet of the file
for row := range data_xlsx.ReadRows(sheet) { // For each row of the sheet
text_row := ""
for i, col := range row.Cells { // Concatenate cells of the row with tab separator
if i > 0 {
text_row = fmt.Sprintf("%s\t%s", text_row, col.Value)
} else {
text_row = fmt.Sprintf("%s%s", text_row, col.Value)
}
}
if rows_xlsx != "" { // Save all rows as ONE string
rows_xlsx = fmt.Sprintf("%s\n%s", rows_xlsx, text_row)
} else {
rows_xlsx = fmt.Sprintf("%s%s", rows_xlsx, text_row)
}
}
}
// fmt.Println(rows_xlsx)
return rows_xlsx, nil
}
func pdf2txt(filename string) (string, error) { // BUG: Cannot get text from specific (or really malformed?) pages
file_pdf, data_pdf, err := pdf.Open(filename) // Read data from pdf file
if err != nil {
return "", err
}
defer file_pdf.Close()
var buff_pdf bytes.Buffer
bytes_pdf, err := data_pdf.GetPlainText() // Get text of entire pdf file
if err != nil {
return "", err
}
buff_pdf.ReadFrom(bytes_pdf)
text_pdf := buff_pdf.String()
// fmt.Println(text_pdf)
return text_pdf, nil
}
func doc2txt(filename string) (string, error) {
file_doc, _ := os.Open(filename) // Open doc file
data_doc, err := DOC2Text(file_doc) // Read data from a doc file
if err != nil {
return "", err
}
defer file_doc.Close()
actual := data_doc.(*bytes.Buffer) // Buffer for hold line text of doc file
text_doc := ""
for aline, err := actual.ReadString('\r'); err == nil; aline, err = actual.ReadString('\r') { // Get text by line
aline = strings.Trim(aline, " \n\r")
if aline != "" {
if text_doc != "" {
text_doc = fmt.Sprintf("%s\n%s", text_doc, removeStrangeChars(aline))
} else {
text_doc = fmt.Sprintf("%s%s", text_doc, removeStrangeChars(aline))
}
}
}
text_doc = removeStrangeChars(text_doc)
// fmt.Println(text_doc)
return text_doc, nil
}
func ppt2txt(filename string) (string, error) {
file_ppt, err := os.Open(filename) // Open ppt file
if err != nil {
return "", err
}
defer file_ppt.Close()
text_ppt, err := ExtractPPTText(file_ppt) // Read text from a ppt file
if err != nil {
return "", err
}
text_ppt = removeStrangeChars(text_ppt)
// fmt.Println(text_ppt)
return text_ppt, nil
}
func xls2txt(filename string) (string, error) {
file_xls, err := os.Open(filename) // Open xls file
if err != nil {
return "", err
}
defer file_xls.Close()
text_xls, err := XLS2Text(file_xls) // Convert xls data to an array of rows (include all sheets)
if err != nil {
return "", err
}
text_xls = removeStrangeChars(text_xls)
// fmt.Println(text_xls)
return text_xls, nil
}

201
godo/office/ole2/LICENSE

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

2
godo/office/ole2/README.md

@ -0,0 +1,2 @@
# ole2
Microsoft Compound Document File Format library in Golang

35
godo/office/ole2/dir.go

@ -0,0 +1,35 @@
package ole2
import (
"unicode/utf16"
)
const (
EMPTY = iota
USERSTORAGE = iota
USERSTREAM = iota
LOCKBYTES = iota
PROPERTY = iota
ROOT = iota
)
type File struct {
NameBts [32]uint16
Bsize uint16
Type byte
Flag byte
Left uint32
Right uint32
Child uint32
Guid [8]uint16
Userflags uint32
Time [2]uint64
Sstart uint32
Size uint32
Proptype uint32
}
func (d *File) Name() string {
runes := utf16.Decode(d.NameBts[:d.Bsize/2-1])
return string(runes)
}

42
godo/office/ole2/header.go

@ -0,0 +1,42 @@
package ole2
import (
"bytes"
"encoding/binary"
"fmt"
)
type Header struct {
Id [2]uint32
Clid [4]uint32
Verminor uint16
Verdll uint16
Byteorder uint16
Lsectorb uint16
Lssectorb uint16
_ uint16
_ uint64
Cfat uint32 //Total number of sectors used for the sector allocation table
Dirstart uint32 //SecID of first sector of the directory stream
_ uint32
Sectorcutoff uint32 //Minimum size of a standard stream
Sfatstart uint32 //SecID of first sector of the short-sector allocation table
Csfat uint32 //Total number of sectors used for the short-sector allocation table
Difstart uint32 //SecID of first sector of the master sector allocation table
Cdif uint32 //Total number of sectors used for the master sector allocation table
Msat [109]uint32
}
func parseHeader(bts []byte) (*Header, error) {
buf := bytes.NewBuffer(bts)
header := new(Header)
binary.Read(buf, binary.LittleEndian, header)
if header.Id[0] != 0xE011CFD0 || header.Id[1] != 0xE11AB1A1 || header.Byteorder != 0xFFFE {
return nil, fmt.Errorf("not an excel file")
}
return header, nil
}

156
godo/office/ole2/ole.go

@ -0,0 +1,156 @@
package ole2
import (
"encoding/binary"
"io"
)
var ENDOFCHAIN = uint32(0xFFFFFFFE) //-2
var FREESECT = uint32(0xFFFFFFFF) // -1
type Ole struct {
header *Header
Lsector uint32
Lssector uint32
SecID []uint32
SSecID []uint32
Files []File
reader io.ReadSeeker
}
func Open(reader io.ReadSeeker, charset string) (ole *Ole, err error) {
var header *Header
var hbts = make([]byte, 512)
reader.Read(hbts)
if header, err = parseHeader(hbts); err == nil {
ole = new(Ole)
ole.reader = reader
ole.header = header
ole.Lsector = 512 //TODO
ole.Lssector = 64 //TODO
err = ole.readMSAT()
return ole, err
}
return nil, err
}
func (o *Ole) ListDir() (dir []*File, err error) {
sector := o.stream_read(o.header.Dirstart, 0)
dir = make([]*File, 0)
for {
d := new(File)
err = binary.Read(sector, binary.LittleEndian, d)
if err == nil && d.Type != EMPTY {
dir = append(dir, d)
} else {
break
}
}
if err == io.EOF && dir != nil {
return dir, nil
}
return
}
func (o *Ole) OpenFile(file *File, root *File) io.ReadSeeker {
if file.Size < o.header.Sectorcutoff {
return o.short_stream_read(file.Sstart, file.Size, root.Sstart)
} else {
return o.stream_read(file.Sstart, file.Size)
}
}
// Read MSAT
func (o *Ole) readMSAT() error {
// int sectorNum;
count := uint32(109)
if o.header.Cfat < 109 {
count = o.header.Cfat
}
for i := uint32(0); i < count; i++ {
if sector, err := o.sector_read(o.header.Msat[i]); err == nil {
sids := sector.AllValues(o.Lsector)
o.SecID = append(o.SecID, sids...)
} else {
return err
}
}
for sid := o.header.Difstart; sid != ENDOFCHAIN; {
if sector, err := o.sector_read(sid); err == nil {
sids := sector.MsatValues(o.Lsector)
for _, sid := range sids {
if sector, err := o.sector_read(sid); err == nil {
sids := sector.AllValues(o.Lsector)
o.SecID = append(o.SecID, sids...)
} else {
return err
}
}
sid = sector.NextSid(o.Lsector)
} else {
return err
}
}
for i := uint32(0); i < o.header.Csfat; i++ {
sid := o.header.Sfatstart
if sid != ENDOFCHAIN {
if sector, err := o.sector_read(sid); err == nil {
sids := sector.MsatValues(o.Lsector)
o.SSecID = append(o.SSecID, sids...)
sid = sector.NextSid(o.Lsector)
} else {
return err
}
}
}
return nil
}
func (o *Ole) stream_read(sid uint32, size uint32) *StreamReader {
return &StreamReader{o.SecID, sid, o.reader, sid, 0, o.Lsector, int64(size), 0, sector_pos}
}
func (o *Ole) short_stream_read(sid uint32, size uint32, startSecId uint32) *StreamReader {
ssatReader := &StreamReader{o.SecID, startSecId, o.reader, sid, 0, o.Lsector, int64(uint32(len(o.SSecID)) * o.Lssector), 0, sector_pos}
return &StreamReader{o.SSecID, sid, ssatReader, sid, 0, o.Lssector, int64(size), 0, short_sector_pos}
}
func (o *Ole) sector_read(sid uint32) (Sector, error) {
return o.sector_read_internal(sid, o.Lsector)
}
func (o *Ole) short_sector_read(sid uint32) (Sector, error) {
return o.sector_read_internal(sid, o.Lssector)
}
func (o *Ole) sector_read_internal(sid, size uint32) (Sector, error) {
pos := sector_pos(sid, size)
if _, err := o.reader.Seek(int64(pos), 0); err == nil {
var bts = make([]byte, size)
o.reader.Read(bts)
return Sector(bts), nil
} else {
return nil, err
}
}
func sector_pos(sid uint32, size uint32) uint32 {
return 512 + sid*size
}
func short_sector_pos(sid uint32, size uint32) uint32 {
return sid * size
}

19
godo/office/ole2/pss.go

@ -0,0 +1,19 @@
package ole2
import ()
type PSS struct {
name [64]byte
bsize uint16
typ byte
flag byte
left uint32
right uint32
child uint32
guid [16]uint16
userflags uint32
time [2]uint64
sstart uint32
size uint32
_ uint32
}

37
godo/office/ole2/sector.go

@ -0,0 +1,37 @@
package ole2
import (
"bytes"
"encoding/binary"
)
type Sector []byte
func (s *Sector) Uint32(bit uint32) uint32 {
return binary.LittleEndian.Uint32((*s)[bit : bit+4])
}
func (s *Sector) NextSid(size uint32) uint32 {
return s.Uint32(size - 4)
}
func (s *Sector) MsatValues(size uint32) []uint32 {
return s.values(size, int(size/4-1))
}
func (s *Sector) AllValues(size uint32) []uint32 {
return s.values(size, int(size/4))
}
func (s *Sector) values(size uint32, length int) []uint32 {
var res = make([]uint32, length)
buf := bytes.NewBuffer((*s))
binary.Read(buf, binary.LittleEndian, res)
return res
}

13
godo/office/ole2/stream.go

@ -0,0 +1,13 @@
package ole2
type Stream struct {
Ole *Ole
Start uint32
Pos uint32
Cfat int
Size int
Fatpos uint32
Bufsize uint32
Eof byte
Sfat bool
}

96
godo/office/ole2/stream_reader.go

@ -0,0 +1,96 @@
package ole2
import (
"io"
"log"
)
var DEBUG = false
type StreamReader struct {
sat []uint32
start uint32
reader io.ReadSeeker
offset_of_sector uint32
offset_in_sector uint32
size_sector uint32
size int64
offset int64
sector_pos func(uint32, uint32) uint32
}
func (r *StreamReader) Read(p []byte) (n int, err error) {
if r.offset_of_sector == ENDOFCHAIN {
return 0, io.EOF
}
pos := r.sector_pos(r.offset_of_sector, r.size_sector) + r.offset_in_sector
r.reader.Seek(int64(pos), 0)
readed := uint32(0)
for remainLen := uint32(len(p)) - readed; remainLen > r.size_sector-r.offset_in_sector; remainLen = uint32(len(p)) - readed {
if n, err := r.reader.Read(p[readed : readed+r.size_sector-r.offset_in_sector]); err != nil {
return int(readed) + n, err
} else {
readed += uint32(n)
r.offset_in_sector = 0
if r.offset_of_sector >= uint32(len(r.sat)) {
log.Fatal(`
THIS SHOULD NOT HAPPEN, IF YOUR PROGRAM BREAK,
COMMENT THIS LINE TO CONTINUE AND MAIL ME XLS FILE
TO TEST, THANKS`)
return int(readed), io.EOF
} else {
r.offset_of_sector = r.sat[r.offset_of_sector]
}
if r.offset_of_sector == ENDOFCHAIN {
return int(readed), io.EOF
}
pos := r.sector_pos(r.offset_of_sector, r.size_sector) + r.offset_in_sector
r.reader.Seek(int64(pos), 0)
}
}
if n, err := r.reader.Read(p[readed:len(p)]); err == nil {
r.offset_in_sector += uint32(n)
if DEBUG {
log.Printf("pos:%x,bit:% X", r.offset_of_sector, p)
}
return len(p), nil
} else {
return int(readed) + n, err
}
}
func (r *StreamReader) Seek(offset int64, whence int) (offset_result int64, err error) {
if whence == 0 {
r.offset_of_sector = r.start
r.offset_in_sector = 0
r.offset = offset
} else {
r.offset += offset
}
if r.offset_of_sector == ENDOFCHAIN {
return r.offset, io.EOF
}
for offset >= int64(r.size_sector-r.offset_in_sector) {
r.offset_of_sector = r.sat[r.offset_of_sector]
offset -= int64(r.size_sector - r.offset_in_sector)
r.offset_in_sector = 0
if r.offset_of_sector == ENDOFCHAIN {
err = io.EOF
goto return_res
}
}
if r.size <= r.offset {
err = io.EOF
r.offset = r.size
} else {
r.offset_in_sector += uint32(offset)
}
return_res:
offset_result = r.offset
return
}

75
godo/office/ole2/stream_reader_test.go

@ -0,0 +1,75 @@
package ole2
import (
"bytes"
"fmt"
"testing"
)
func TestRead(t *testing.T) {
bts := make([]byte, 1<<10)
for i := 0; i < 1<<10; i++ {
bts[i] = byte(i)
}
ole := &Ole{nil, 8, 1, []uint32{2, 1, ENDOFCHAIN}, []uint32{}, []File{}, bytes.NewReader(bts)}
r := ole.stream_read(0, 30)
res := make([]byte, 14)
fmt.Println(r.Read(res))
fmt.Println(res)
}
func TestSeek(t *testing.T) {
bts := make([]byte, 1<<10)
for i := 0; i < 1<<10; i++ {
bts[i] = byte(i)
}
ole := &Ole{nil, 8, 1, []uint32{2, 1, ENDOFCHAIN}, []uint32{}, []File{}, bytes.NewReader(bts)}
r := ole.stream_read(0, 30)
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
}
func TestSeek1(t *testing.T) {
bts := make([]byte, 1<<10)
for i := 0; i < 1<<10; i++ {
bts[i] = byte(i)
}
ole := &Ole{nil, 8, 1, []uint32{2, 1, ENDOFCHAIN}, []uint32{}, []File{}, bytes.NewReader(bts)}
r := ole.stream_read(0, 30)
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
fmt.Println(r.Seek(2, 1))
}

138
godo/office/pdf/README.md

@ -0,0 +1,138 @@
# PDF Reader
[![Built with WeBuild](https://raw.githubusercontent.com/webuild-community/badge/master/svg/WeBuild.svg)](https://webuild.community)
A simple Go library which enables reading PDF files. Forked from https://github.com/rsc/pdf
Features
- Get plain text content (without format)
- Get Content (including all font and formatting information)
## Install:
`go get -u github.com/ledongthuc/pdf`
## Read plain text
```golang
package main
import (
"bytes"
"fmt"
"github.com/ledongthuc/pdf"
)
func main() {
pdf.DebugOn = true
content, err := readPdf("test.pdf") // Read local pdf file
if err != nil {
panic(err)
}
fmt.Println(content)
return
}
func readPdf(path string) (string, error) {
f, r, err := pdf.Open(path)
// remember close file
defer f.Close()
if err != nil {
return "", err
}
var buf bytes.Buffer
b, err := r.GetPlainText()
if err != nil {
return "", err
}
buf.ReadFrom(b)
return buf.String(), nil
}
```
## Read all text with styles from PDF
```golang
func readPdf2(path string) (string, error) {
f, r, err := pdf.Open(path)
// remember close file
defer f.Close()
if err != nil {
return "", err
}
totalPage := r.NumPage()
for pageIndex := 1; pageIndex <= totalPage; pageIndex++ {
p := r.Page(pageIndex)
if p.V.IsNull() {
continue
}
var lastTextStyle pdf.Text
texts := p.Content().Text
for _, text := range texts {
if isSameSentence(text, lastTextStyle) {
lastTextStyle.S = lastTextStyle.S + text.S
} else {
fmt.Printf("Font: %s, Font-size: %f, x: %f, y: %f, content: %s \n", lastTextStyle.Font, lastTextStyle.FontSize, lastTextStyle.X, lastTextStyle.Y, lastTextStyle.S)
lastTextStyle = text
}
}
}
return "", nil
}
```
## Read text grouped by rows
```golang
package main
import (
"fmt"
"os"
"github.com/ledongthuc/pdf"
)
func main() {
content, err := readPdf(os.Args[1]) // Read local pdf file
if err != nil {
panic(err)
}
fmt.Println(content)
return
}
func readPdf(path string) (string, error) {
f, r, err := pdf.Open(path)
defer func() {
_ = f.Close()
}()
if err != nil {
return "", err
}
totalPage := r.NumPage()
for pageIndex := 1; pageIndex <= totalPage; pageIndex++ {
p := r.Page(pageIndex)
if p.V.IsNull() {
continue
}
rows, _ := p.GetTextByRow()
for _, row := range rows {
println(">>>> row: ", row.Position)
for _, word := range row.Content {
fmt.Println(word.S)
}
}
}
return "", nil
}
```
## Demo
![Run example](https://i.gyazo.com/01fbc539e9872593e0ff6bac7e954e6d.gif)

49
godo/office/pdf/ascii85.go

@ -0,0 +1,49 @@
package pdf
import (
"io"
)
type alphaReader struct {
reader io.Reader
}
func newAlphaReader(reader io.Reader) *alphaReader {
return &alphaReader{reader: reader}
}
func checkASCII85(r byte) byte {
if r >= '!' && r <= 'u' { // 33 <= ascii85 <=117
return r
}
if r == '~' {
return 1 // for marking possible end of data
}
return 0 // if non-ascii85
}
func (a *alphaReader) Read(p []byte) (int, error) {
n, err := a.reader.Read(p)
if err == io.EOF {
}
if err != nil {
return n, err
}
buf := make([]byte, n)
tilda := false
for i := 0; i < n; i++ {
char := checkASCII85(p[i])
if char == '>' && tilda { // end of data
break
}
if char > 1 {
buf[i] = char
}
if char == 1 {
tilda = true // possible end of data
}
}
copy(p, buf)
return n, nil
}

522
godo/office/pdf/lex.go

@ -0,0 +1,522 @@
package pdf
import (
"fmt"
"io"
"strconv"
)
// A token is a PDF token in the input stream, one of the following Go types:
//
// bool, a PDF boolean
// int64, a PDF integer
// float64, a PDF real
// string, a PDF string literal
// keyword, a PDF keyword
// name, a PDF name without the leading slash
type token interface{}
// A name is a PDF name, without the leading slash.
type name string
// A keyword is a PDF keyword.
// Delimiter tokens used in higher-level syntax,
// such as "<<", ">>", "[", "]", "{", "}", are also treated as keywords.
type keyword string
// A buffer holds buffered input bytes from the PDF file.
type buffer struct {
r io.Reader // source of data
buf []byte // buffered data
pos int // read index in buf
offset int64 // offset at end of buf; aka offset of next read
tmp []byte // scratch space for accumulating token
unread []token // queue of read but then unread tokens
allowEOF bool
allowObjptr bool
allowStream bool
eof bool
key []byte
useAES bool
objptr objptr
}
// newBuffer returns a new buffer reading from r at the given offset.
func newBuffer(r io.Reader, offset int64) *buffer {
return &buffer{
r: r,
offset: offset,
buf: make([]byte, 0, 4096),
allowObjptr: true,
allowStream: true,
}
}
func (b *buffer) seek(offset int64) {
b.offset = offset
b.buf = b.buf[:0]
b.pos = 0
b.unread = b.unread[:0]
}
func (b *buffer) readByte() byte {
if b.pos >= len(b.buf) {
b.reload()
if b.pos >= len(b.buf) {
return '\n'
}
}
c := b.buf[b.pos]
b.pos++
return c
}
func (b *buffer) errorf(format string, args ...interface{}) {
panic(fmt.Errorf(format, args...))
}
func (b *buffer) reload() bool {
n := cap(b.buf) - int(b.offset%int64(cap(b.buf)))
n, err := b.r.Read(b.buf[:n])
if n == 0 && err != nil {
b.buf = b.buf[:0]
b.pos = 0
if b.allowEOF && err == io.EOF {
b.eof = true
return false
}
b.errorf("malformed PDF: reading at offset %d: %v", b.offset, err)
return false
}
b.offset += int64(n)
b.buf = b.buf[:n]
b.pos = 0
return true
}
func (b *buffer) seekForward(offset int64) {
for b.offset < offset {
if !b.reload() {
return
}
}
b.pos = len(b.buf) - int(b.offset-offset)
}
func (b *buffer) readOffset() int64 {
return b.offset - int64(len(b.buf)) + int64(b.pos)
}
func (b *buffer) unreadByte() {
if b.pos > 0 {
b.pos--
}
}
func (b *buffer) unreadToken(t token) {
b.unread = append(b.unread, t)
}
func (b *buffer) readToken() token {
if n := len(b.unread); n > 0 {
t := b.unread[n-1]
b.unread = b.unread[:n-1]
return t
}
// Find first non-space, non-comment byte.
c := b.readByte()
for {
if isSpace(c) {
if b.eof {
return io.EOF
}
c = b.readByte()
} else if c == '%' {
for c != '\r' && c != '\n' {
c = b.readByte()
}
} else {
break
}
}
switch c {
case '<':
if b.readByte() == '<' {
return keyword("<<")
}
b.unreadByte()
return b.readHexString()
case '(':
return b.readLiteralString()
case '[', ']', '{', '}':
return keyword(string(c))
case '/':
return b.readName()
case '>':
if b.readByte() == '>' {
return keyword(">>")
}
b.unreadByte()
fallthrough
default:
if isDelim(c) {
b.errorf("unexpected delimiter %#q", rune(c))
return nil
}
b.unreadByte()
return b.readKeyword()
}
}
func (b *buffer) readHexString() token {
tmp := b.tmp[:0]
for {
Loop:
c := b.readByte()
if c == '>' {
break
}
if isSpace(c) {
goto Loop
}
Loop2:
c2 := b.readByte()
if isSpace(c2) {
goto Loop2
}
x := unhex(c)<<4 | unhex(c2)
if x < 0 {
b.errorf("malformed hex string %c %c %s", c, c2, b.buf[b.pos:])
break
}
tmp = append(tmp, byte(x))
}
b.tmp = tmp
return string(tmp)
}
func unhex(b byte) int {
switch {
case '0' <= b && b <= '9':
return int(b) - '0'
case 'a' <= b && b <= 'f':
return int(b) - 'a' + 10
case 'A' <= b && b <= 'F':
return int(b) - 'A' + 10
}
return -1
}
func (b *buffer) readLiteralString() token {
tmp := b.tmp[:0]
depth := 1
Loop:
for !b.eof {
c := b.readByte()
switch c {
default:
tmp = append(tmp, c)
case '(':
depth++
tmp = append(tmp, c)
case ')':
if depth--; depth == 0 {
break Loop
}
tmp = append(tmp, c)
case '\\':
switch c = b.readByte(); c {
default:
b.errorf("invalid escape sequence \\%c", c)
tmp = append(tmp, '\\', c)
case 'n':
tmp = append(tmp, '\n')
case 'r':
tmp = append(tmp, '\r')
case 'b':
tmp = append(tmp, '\b')
case 't':
tmp = append(tmp, '\t')
case 'f':
tmp = append(tmp, '\f')
case '(', ')', '\\':
tmp = append(tmp, c)
case '\r':
if b.readByte() != '\n' {
b.unreadByte()
}
fallthrough
case '\n':
// no append
case '0', '1', '2', '3', '4', '5', '6', '7':
x := int(c - '0')
for i := 0; i < 2; i++ {
c = b.readByte()
if c < '0' || c > '7' {
b.unreadByte()
break
}
x = x*8 + int(c-'0')
}
if x > 255 {
b.errorf("invalid octal escape \\%03o", x)
}
tmp = append(tmp, byte(x))
}
}
}
b.tmp = tmp
return string(tmp)
}
func (b *buffer) readName() token {
tmp := b.tmp[:0]
for {
c := b.readByte()
if isDelim(c) || isSpace(c) {
b.unreadByte()
break
}
if c == '#' {
x := unhex(b.readByte())<<4 | unhex(b.readByte())
if x < 0 {
b.errorf("malformed name")
}
tmp = append(tmp, byte(x))
continue
}
tmp = append(tmp, c)
}
b.tmp = tmp
return name(string(tmp))
}
func (b *buffer) readKeyword() token {
tmp := b.tmp[:0]
for {
c := b.readByte()
if isDelim(c) || isSpace(c) {
b.unreadByte()
break
}
tmp = append(tmp, c)
}
b.tmp = tmp
s := string(tmp)
switch {
case s == "true":
return true
case s == "false":
return false
case isInteger(s):
x, err := strconv.ParseInt(s, 10, 64)
if err != nil {
b.errorf("invalid integer %s", s)
}
return x
case isReal(s):
x, err := strconv.ParseFloat(s, 64)
if err != nil {
b.errorf("invalid real %s", s)
}
return x
}
return keyword(string(tmp))
}
func isInteger(s string) bool {
if len(s) > 0 && (s[0] == '+' || s[0] == '-') {
s = s[1:]
}
if len(s) == 0 {
return false
}
for _, c := range s {
if c < '0' || '9' < c {
return false
}
}
return true
}
func isReal(s string) bool {
if len(s) > 0 && (s[0] == '+' || s[0] == '-') {
s = s[1:]
}
if len(s) == 0 {
return false
}
ndot := 0
for _, c := range s {
if c == '.' {
ndot++
continue
}
if c < '0' || '9' < c {
return false
}
}
return ndot == 1
}
// An object is a PDF syntax object, one of the following Go types:
//
// bool, a PDF boolean
// int64, a PDF integer
// float64, a PDF real
// string, a PDF string literal
// name, a PDF name without the leading slash
// dict, a PDF dictionary
// array, a PDF array
// stream, a PDF stream
// objptr, a PDF object reference
// objdef, a PDF object definition
//
// An object may also be nil, to represent the PDF null.
type object interface{}
type dict map[name]object
type array []object
type stream struct {
hdr dict
ptr objptr
offset int64
}
type objptr struct {
id uint32
gen uint16
}
type objdef struct {
ptr objptr
obj object
}
func (b *buffer) readObject() object {
tok := b.readToken()
if kw, ok := tok.(keyword); ok {
switch kw {
case "null":
return nil
case "<<":
return b.readDict()
case "[":
return b.readArray()
}
b.errorf("unexpected keyword %q parsing object", kw)
return nil
}
if str, ok := tok.(string); ok && b.key != nil && b.objptr.id != 0 {
tok = decryptString(b.key, b.useAES, b.objptr, str)
}
if !b.allowObjptr {
return tok
}
if t1, ok := tok.(int64); ok && int64(uint32(t1)) == t1 {
tok2 := b.readToken()
if t2, ok := tok2.(int64); ok && int64(uint16(t2)) == t2 {
tok3 := b.readToken()
switch tok3 {
case keyword("R"):
return objptr{uint32(t1), uint16(t2)}
case keyword("obj"):
old := b.objptr
b.objptr = objptr{uint32(t1), uint16(t2)}
obj := b.readObject()
if _, ok := obj.(stream); !ok {
tok4 := b.readToken()
if tok4 != keyword("endobj") {
b.errorf("missing endobj after indirect object definition")
b.unreadToken(tok4)
}
}
b.objptr = old
return objdef{objptr{uint32(t1), uint16(t2)}, obj}
}
b.unreadToken(tok3)
}
b.unreadToken(tok2)
}
return tok
}
func (b *buffer) readArray() object {
var x array
for {
tok := b.readToken()
if tok == nil || tok == keyword("]") {
break
}
b.unreadToken(tok)
x = append(x, b.readObject())
}
return x
}
func (b *buffer) readDict() object {
x := make(dict)
for {
tok := b.readToken()
if tok == nil || tok == keyword(">>") {
break
}
n, ok := tok.(name)
if !ok {
b.errorf("unexpected non-name key %T(%v) parsing dictionary", tok, tok)
continue
}
x[n] = b.readObject()
}
if !b.allowStream {
return x
}
tok := b.readToken()
if tok != keyword("stream") {
b.unreadToken(tok)
return x
}
switch b.readByte() {
case '\r':
if b.readByte() != '\n' {
b.unreadByte()
}
case '\n':
// ok
default:
b.errorf("stream keyword not followed by newline")
}
return stream{x, b.objptr, b.readOffset()}
}
func isSpace(b byte) bool {
switch b {
case '\x00', '\t', '\n', '\f', '\r', ' ':
return true
}
return false
}
func isDelim(b byte) bool {
switch b {
case '<', '>', '(', ')', '[', ']', '{', '}', '/', '%':
return true
}
return false
}

4286
godo/office/pdf/name.go

File diff suppressed because it is too large

1047
godo/office/pdf/page.go

File diff suppressed because it is too large

134
godo/office/pdf/ps.go

@ -0,0 +1,134 @@
package pdf
import (
"fmt"
"io"
)
// A Stack represents a stack of values.
type Stack struct {
stack []Value
}
func (stk *Stack) Len() int {
return len(stk.stack)
}
func (stk *Stack) Push(v Value) {
stk.stack = append(stk.stack, v)
}
func (stk *Stack) Pop() Value {
n := len(stk.stack)
if n == 0 {
return Value{}
}
v := stk.stack[n-1]
stk.stack[n-1] = Value{}
stk.stack = stk.stack[:n-1]
return v
}
func newDict() Value {
return Value{nil, objptr{}, make(dict)}
}
// Interpret interprets the content in a stream as a basic PostScript program,
// pushing values onto a stack and then calling the do function to execute
// operators. The do function may push or pop values from the stack as needed
// to implement op.
//
// Interpret handles the operators "dict", "currentdict", "begin", "end", "def", and "pop" itself.
//
// Interpret is not a full-blown PostScript interpreter. Its job is to handle the
// very limited PostScript found in certain supporting file formats embedded
// in PDF files, such as cmap files that describe the mapping from font code
// points to Unicode code points.
//
// There is no support for executable blocks, among other limitations.
func Interpret(strm Value, do func(stk *Stack, op string)) {
rd := strm.Reader()
b := newBuffer(rd, 0)
b.allowEOF = true
b.allowObjptr = false
b.allowStream = false
var stk Stack
var dicts []dict
Reading:
for {
tok := b.readToken()
if tok == io.EOF {
break
}
if kw, ok := tok.(keyword); ok {
switch kw {
case "null", "[", "]", "<<", ">>":
break
default:
for i := len(dicts) - 1; i >= 0; i-- {
if v, ok := dicts[i][name(kw)]; ok {
stk.Push(Value{nil, objptr{}, v})
continue Reading
}
}
do(&stk, string(kw))
continue
case "dict":
stk.Pop()
stk.Push(Value{nil, objptr{}, make(dict)})
continue
case "currentdict":
if len(dicts) == 0 {
panic("no current dictionary")
}
stk.Push(Value{nil, objptr{}, dicts[len(dicts)-1]})
continue
case "begin":
d := stk.Pop()
if d.Kind() != Dict {
panic("cannot begin non-dict")
}
dicts = append(dicts, d.data.(dict))
continue
case "end":
if len(dicts) <= 0 {
panic("mismatched begin/end")
}
dicts = dicts[:len(dicts)-1]
continue
case "def":
if len(dicts) <= 0 {
panic("def without open dict")
}
val := stk.Pop()
key, ok := stk.Pop().data.(name)
if !ok {
panic("def of non-name")
}
dicts[len(dicts)-1][key] = val.data
continue
case "pop":
stk.Pop()
continue
}
}
b.unreadToken(tok)
obj := b.readObject()
stk.Push(Value{nil, objptr{}, obj})
}
}
type seqReader struct {
rd io.Reader
offset int64
}
func (r *seqReader) ReadAt(buf []byte, offset int64) (int, error) {
if offset != r.offset {
return 0, fmt.Errorf("non-sequential read of stream")
}
n, err := io.ReadFull(r.rd, buf)
r.offset += int64(n)
return n, err
}

1111
godo/office/pdf/read.go

File diff suppressed because it is too large

154
godo/office/pdf/text.go

@ -0,0 +1,154 @@
package pdf
import (
"unicode"
"unicode/utf16"
)
const noRune = unicode.ReplacementChar
func isPDFDocEncoded(s string) bool {
if isUTF16(s) {
return false
}
for i := 0; i < len(s); i++ {
if pdfDocEncoding[s[i]] == noRune {
return false
}
}
return true
}
func pdfDocDecode(s string) string {
for i := 0; i < len(s); i++ {
if s[i] >= 0x80 || pdfDocEncoding[s[i]] != rune(s[i]) {
goto Decode
}
}
return s
Decode:
r := make([]rune, len(s))
for i := 0; i < len(s); i++ {
r[i] = pdfDocEncoding[s[i]]
}
return string(r)
}
func isUTF16(s string) bool {
return len(s) >= 2 && s[0] == 0xfe && s[1] == 0xff && len(s)%2 == 0
}
func utf16Decode(s string) string {
var u []uint16
for i := 0; i < len(s); i += 2 {
u = append(u, uint16(s[i])<<8|uint16(s[i+1]))
}
return string(utf16.Decode(u))
}
// See PDF 32000-1:2008, Table D.2
var pdfDocEncoding = [256]rune{
noRune, noRune, noRune, noRune, noRune, noRune, noRune, noRune,
noRune, 0x0009, 0x000a, noRune, noRune, 0x000d, noRune, noRune,
noRune, noRune, noRune, noRune, noRune, noRune, noRune, noRune,
0x02d8, 0x02c7, 0x02c6, 0x02d9, 0x02dd, 0x02db, 0x02da, 0x02dc,
0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f,
0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f,
0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, noRune,
0x2022, 0x2020, 0x2021, 0x2026, 0x2014, 0x2013, 0x0192, 0x2044,
0x2039, 0x203a, 0x2212, 0x2030, 0x201e, 0x201c, 0x201d, 0x2018,
0x2019, 0x201a, 0x2122, 0xfb01, 0xfb02, 0x0141, 0x0152, 0x0160,
0x0178, 0x017d, 0x0131, 0x0142, 0x0153, 0x0161, 0x017e, noRune,
0x20ac, 0x00a1, 0x00a2, 0x00a3, 0x00a4, 0x00a5, 0x00a6, 0x00a7,
0x00a8, 0x00a9, 0x00aa, 0x00ab, 0x00ac, noRune, 0x00ae, 0x00af,
0x00b0, 0x00b1, 0x00b2, 0x00b3, 0x00b4, 0x00b5, 0x00b6, 0x00b7,
0x00b8, 0x00b9, 0x00ba, 0x00bb, 0x00bc, 0x00bd, 0x00be, 0x00bf,
0x00c0, 0x00c1, 0x00c2, 0x00c3, 0x00c4, 0x00c5, 0x00c6, 0x00c7,
0x00c8, 0x00c9, 0x00ca, 0x00cb, 0x00cc, 0x00cd, 0x00ce, 0x00cf,
0x00d0, 0x00d1, 0x00d2, 0x00d3, 0x00d4, 0x00d5, 0x00d6, 0x00d7,
0x00d8, 0x00d9, 0x00da, 0x00db, 0x00dc, 0x00dd, 0x00de, 0x00df,
0x00e0, 0x00e1, 0x00e2, 0x00e3, 0x00e4, 0x00e5, 0x00e6, 0x00e7,
0x00e8, 0x00e9, 0x00ea, 0x00eb, 0x00ec, 0x00ed, 0x00ee, 0x00ef,
0x00f0, 0x00f1, 0x00f2, 0x00f3, 0x00f4, 0x00f5, 0x00f6, 0x00f7,
0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x00fd, 0x00fe, 0x00ff,
}
var winAnsiEncoding = [256]rune{
0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f,
0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f,
0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f,
0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f,
0x20ac, noRune, 0x201a, 0x0192, 0x201e, 0x2026, 0x2020, 0x2021,
0x02c6, 0x2030, 0x0160, 0x2039, 0x0152, noRune, 0x017d, noRune,
noRune, 0x2018, 0x2019, 0x201c, 0x201d, 0x2022, 0x2013, 0x2014,
0x02dc, 0x2122, 0x0161, 0x203a, 0x0153, noRune, 0x017e, 0x0178,
0x00a0, 0x00a1, 0x00a2, 0x00a3, 0x00a4, 0x00a5, 0x00a6, 0x00a7,
0x00a8, 0x00a9, 0x00aa, 0x00ab, 0x00ac, 0x00ad, 0x00ae, 0x00af,
0x00b0, 0x00b1, 0x00b2, 0x00b3, 0x00b4, 0x00b5, 0x00b6, 0x00b7,
0x00b8, 0x00b9, 0x00ba, 0x00bb, 0x00bc, 0x00bd, 0x00be, 0x00bf,
0x00c0, 0x00c1, 0x00c2, 0x00c3, 0x00c4, 0x00c5, 0x00c6, 0x00c7,
0x00c8, 0x00c9, 0x00ca, 0x00cb, 0x00cc, 0x00cd, 0x00ce, 0x00cf,
0x00d0, 0x00d1, 0x00d2, 0x00d3, 0x00d4, 0x00d5, 0x00d6, 0x00d7,
0x00d8, 0x00d9, 0x00da, 0x00db, 0x00dc, 0x00dd, 0x00de, 0x00df,
0x00e0, 0x00e1, 0x00e2, 0x00e3, 0x00e4, 0x00e5, 0x00e6, 0x00e7,
0x00e8, 0x00e9, 0x00ea, 0x00eb, 0x00ec, 0x00ed, 0x00ee, 0x00ef,
0x00f0, 0x00f1, 0x00f2, 0x00f3, 0x00f4, 0x00f5, 0x00f6, 0x00f7,
0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x00fd, 0x00fe, 0x00ff,
}
var macRomanEncoding = [256]rune{
0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f,
0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f,
0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f,
0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f,
0x00c4, 0x00c5, 0x00c7, 0x00c9, 0x00d1, 0x00d6, 0x00dc, 0x00e1,
0x00e0, 0x00e2, 0x00e4, 0x00e3, 0x00e5, 0x00e7, 0x00e9, 0x00e8,
0x00ea, 0x00eb, 0x00ed, 0x00ec, 0x00ee, 0x00ef, 0x00f1, 0x00f3,
0x00f2, 0x00f4, 0x00f6, 0x00f5, 0x00fa, 0x00f9, 0x00fb, 0x00fc,
0x2020, 0x00b0, 0x00a2, 0x00a3, 0x00a7, 0x2022, 0x00b6, 0x00df,
0x00ae, 0x00a9, 0x2122, 0x00b4, 0x00a8, 0x2260, 0x00c6, 0x00d8,
0x221e, 0x00b1, 0x2264, 0x2265, 0x00a5, 0x00b5, 0x2202, 0x2211,
0x220f, 0x03c0, 0x222b, 0x00aa, 0x00ba, 0x03a9, 0x00e6, 0x00f8,
0x00bf, 0x00a1, 0x00ac, 0x221a, 0x0192, 0x2248, 0x2206, 0x00ab,
0x00bb, 0x2026, 0x00a0, 0x00c0, 0x00c3, 0x00d5, 0x0152, 0x0153,
0x2013, 0x2014, 0x201c, 0x201d, 0x2018, 0x2019, 0x00f7, 0x25ca,
0x00ff, 0x0178, 0x2044, 0x20ac, 0x2039, 0x203a, 0xfb01, 0xfb02,
0x2021, 0x00b7, 0x201a, 0x201e, 0x2030, 0x00c2, 0x00ca, 0x00c1,
0x00cb, 0x00c8, 0x00cd, 0x00ce, 0x00cf, 0x00cc, 0x00d3, 0x00d4,
0xf8ff, 0x00d2, 0x00da, 0x00db, 0x00d9, 0x0131, 0x02c6, 0x02dc,
0x00af, 0x02d8, 0x02d9, 0x02da, 0x00b8, 0x02dd, 0x02db, 0x02c7,
}

358
godo/office/ppt.go

@ -0,0 +1,358 @@
package office
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"strings"
"github.com/richardlehane/mscfb"
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/unicode"
"golang.org/x/text/transform"
)
// skipped metadata or non-readable records in slide container
var slideSkippedRecordsTypes = []recordType{
recordTypeExternalObjectList,
recordTypeEnvironment,
recordTypeSoundCollection,
recordTypeDrawingGroup,
recordTypeSlideListWithText,
recordTypeList,
recordTypeHeadersFooters,
recordTypeHeadersFooters,
}
// skipped metadata or non-readable records in drawing container
var drawingSkippedRecordsTypes = []recordType{
recordTypeSlideShowSlideInfoAtom,
recordTypeHeadersFooters,
recordTypeRoundTripSlideSyncInfo12,
}
const (
userPersistIDRefOffset = 16
)
// ExtractText parses PPT file represented by Reader r and extracts text from it.
func ExtractPPTText(r io.Reader) (string, error) {
ra := ToReaderAt(r)
d, err := mscfb.New(ra)
if err != nil {
return "", err
}
currentUser, pptDocument := getCurrentUserAndPPTDoc(d)
if err := isValidPPT(currentUser, pptDocument); err != nil {
return "", err
}
offsetPersistDirectory, liveRecord, err := getUserEditAtomsData(currentUser, pptDocument)
if err != nil {
return "", err
}
persistDirEntries, err := getPersistDirectoryEntries(pptDocument, offsetPersistDirectory)
if err != nil {
return "", err
}
// get DocumentContainer reference
docPersistIDRef := liveRecord.LongAt(userPersistIDRefOffset)
documentContainer, err := readRecord(pptDocument, persistDirEntries[docPersistIDRef], recordTypeDocument)
if err != nil {
return "", err
}
return readSlides(documentContainer, pptDocument, persistDirEntries)
}
// toMemoryBuffer transforms io.Reader to in-memory io.ReaderAt
// getCurrentUserAndPPTDoc extracts necessary mscfb files from PPT file
func getCurrentUserAndPPTDoc(r *mscfb.Reader) (currentUser *mscfb.File, pptDocument *mscfb.File) {
for _, f := range r.File {
switch f.Name {
case "Current User":
currentUser = f
case "PowerPoint Document":
pptDocument = f
}
}
return currentUser, pptDocument
}
// isValidPPT checks if provided file is valid, meaning
// it has both "Current User" and "PowerPoint Document" files
// and "Current User"'s CurrentUserAtom record has valid header token
func isValidPPT(currentUser, pptDocument *mscfb.File) error {
const (
headerTokenOffset = 12
encryptedDocumentToken = 0xF3D1C4DF
plainDocumentToken = 0xE391C05F
)
if currentUser == nil || pptDocument == nil {
return fmt.Errorf(".ppt file must contain \"Current User\" and \"PowerPoint Document\" streams")
}
var b [4]byte
_, err := currentUser.ReadAt(b[:], headerTokenOffset)
if err != nil {
return err
}
headerToken := binary.LittleEndian.Uint32(b[:])
if headerToken != plainDocumentToken && headerToken != encryptedDocumentToken {
return fmt.Errorf("invalid UserEditAtom header token %X", headerToken)
}
return nil
}
// getUserEditAtomsData extracts "live record" and persist directory offsets
// according to section 2.1.2 of specification (https://msopenspecs.azureedge.net/files/MS-PPT/%5bMS-PPT%5d-210422.pdf)
func getUserEditAtomsData(currentUser, pptDocument *mscfb.File) (
persistDirectoryOffsets []int64,
liveRecord record,
err error,
) {
const (
offsetLastEditInitialPosition = 16
offsetLastEditPosition = 8
persistDirectoryOffsetPosition = 12
)
var b [4]byte
_, err = currentUser.ReadAt(b[:], offsetLastEditInitialPosition)
if err != nil {
return nil, record{}, err
}
offsetLastEdit := binary.LittleEndian.Uint32(b[:])
for {
liveRecord, err = readRecord(pptDocument, int64(offsetLastEdit), recordTypeUserEditAtom)
if err != nil {
if errors.Is(err, errMismatchRecordType) {
break
}
return nil, record{}, err
}
persistDirectoryOffsets = append(
persistDirectoryOffsets,
int64(liveRecord.LongAt(persistDirectoryOffsetPosition)),
)
offsetLastEdit = liveRecord.LongAt(offsetLastEditPosition)
if offsetLastEdit == 0 {
break
}
}
return persistDirectoryOffsets, liveRecord, err
}
// getPersistDirectoryEntries transforms offsets into persists directory identifiers and persist offsets according
// to section 2.1.2 of specification (https://msopenspecs.azureedge.net/files/MS-PPT/%5bMS-PPT%5d-210422.pdf)
func getPersistDirectoryEntries(pptDocument *mscfb.File, offsets []int64) (map[uint32]int64, error) {
const persistOffsetEntrySize = 4
persistDirEntries := make(map[uint32]int64)
for i := len(offsets) - 1; i >= 0; i-- {
rgPersistDirEntry, err := readRecord(pptDocument, offsets[i], recordTypePersistDirectoryAtom)
if err != nil {
return nil, err
}
rgPersistDirEntryData := rgPersistDirEntry.recordData
for j := 0; j < len(rgPersistDirEntryData); {
persist := rgPersistDirEntryData.LongAt(j)
persistID := persist & 0x000FFFFF
cPersist := ((persist & 0xFFF00000) >> 20) & 0x00000FFF
j += 4
for k := uint32(0); k < cPersist; k++ {
persistDirEntries[persistID+k] = int64(rgPersistDirEntryData.LongAt(j + int(k)*persistOffsetEntrySize))
}
j += int(cPersist * persistOffsetEntrySize)
}
}
return persistDirEntries, nil
}
// readSlides reads text from slides of given DocumentContainer
func readSlides(documentContainer, pptDocument io.ReaderAt, persistDirEntries map[uint32]int64) (string, error) {
const slideSkipInitialOffset = 48
offset, err := skipRecords(documentContainer, slideSkipInitialOffset, slideSkippedRecordsTypes)
if err != nil {
return "", err
}
slideList, err := readRecord(documentContainer, offset, recordTypeSlideListWithText)
if err != nil {
return "", err
}
utf16Decoder := unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM).NewDecoder()
var out strings.Builder
n := len(slideList.Data())
for i := 0; i < n; {
block, err := readRecord(slideList, int64(i), recordTypeUnspecified)
if err != nil {
return "", err
}
switch block.Type() {
case recordTypeSlidePersistAtom:
err = readTextFromSlidePersistAtom(block, pptDocument, persistDirEntries, &out, utf16Decoder)
case recordTypeTextCharsAtom:
err = readTextFromTextCharsAtom(block, &out, utf16Decoder)
case recordTypeTextBytesAtom:
err = readTextFromTextBytesAtom(block, &out, utf16Decoder)
}
if err != nil {
return "", err
}
i += len(block.Data()) + 8
}
return out.String(), nil
}
func readTextFromSlidePersistAtom(
block record,
pptDocument io.ReaderAt,
persistDirEntries map[uint32]int64,
out *strings.Builder,
utf16Decoder *encoding.Decoder,
) error {
const (
slidePersistAtomSkipInitialOffset = 32
headerRecordTypeOffset = 2
)
persistDirID := block.LongAt(0)
// extract slide from persist directory
slide, err := readRecord(pptDocument, persistDirEntries[persistDirID], recordTypeSlide)
if err != nil {
return err
}
// skip metadata
offset, err := skipRecords(slide, slidePersistAtomSkipInitialOffset, drawingSkippedRecordsTypes)
if err != nil {
return err
}
drawing, err := readRecord(slide, offset, recordTypeDrawing)
if err != nil {
return err
}
drawingBytes := drawing.Data()
from := 0
for {
// instead of parsing binary PPT format, search text records directly
pocketIdx := matchPocket(drawingBytes, from)
if pocketIdx == -1 {
break
}
// check if it is really a text record - recordType bytes must be preceded by 1-byte version and 3-byte instance
// fields with zero values
if pocketIdx >= 2 && bytes.Equal(drawingBytes[pocketIdx-headerRecordTypeOffset:pocketIdx], []byte{0x00, 0x00}) {
var rec record
if drawingBytes[pocketIdx] == recordTypeTextBytesAtom.LowerPart() {
rec, err = readRecord(drawing, int64(pocketIdx-headerRecordTypeOffset), recordTypeTextBytesAtom)
if err != nil {
return err
}
err = readTextFromTextBytesAtom(rec, out, utf16Decoder)
} else {
rec, err = readRecord(drawing, int64(pocketIdx-headerRecordTypeOffset), recordTypeTextCharsAtom)
if err != nil {
return err
}
err = readTextFromTextCharsAtom(rec, out, utf16Decoder)
}
if err != nil {
return err
}
}
from = pocketIdx + 2
}
return nil
}
func matchPocket(data []byte, from int) int {
data = data[from:]
n := len(data)
for i := 0; i < n; i++ {
switch data[i] {
case recordTypeTextCharsAtom.LowerPart(), recordTypeTextBytesAtom.LowerPart():
if i < n-1 && data[i+1] == 0x0F {
return i + from
}
}
}
return -1
}
// readTextFromTextCharsAtom simply transforms UTF-16LE data into UTF-8 data
func readTextFromTextCharsAtom(atom record, out *strings.Builder, dec *encoding.Decoder) error {
dec.Reset()
transformed, err := dec.Bytes(atom.Data())
if err != nil {
return err
}
out.Write(transformed)
out.WriteByte(' ')
return nil
}
func readTextFromTextBytesAtom(atom record, out *strings.Builder, dec *encoding.Decoder) error {
dec.Reset()
transformed, err := decodeTextBytesAtom(atom.Data(), dec)
if err != nil {
return err
}
out.Write(transformed)
out.WriteByte(' ')
return nil
}
// decodeTextBytesAtom transforms text from TextBytesAtom, which is an array of bytes representing lower parts of UTF-16
// characters into UTF-8 data
func decodeTextBytesAtom(data []byte, dec *encoding.Decoder) ([]byte, error) {
var (
// buffer for UTF-16 char
buf [2]byte
err error
)
result := make([]byte, 0, len(data))
for i := range data {
// filling upper part of character with zero
clear(buf[:])
// fill lower part with byte
buf[0] = data[i]
// transform single UTF-16 char into UTF-8 rune and append it into result
result, _, err = transform.Append(dec, result, buf[:])
if err != nil {
return nil, err
}
}
return result, nil
}
// skipRecords reads headers and skips data of records of provided types
func skipRecords(r io.ReaderAt, initialOffset int64, skippedRecordsTypes []recordType) (int64, error) {
offset := initialOffset
for i := range skippedRecordsTypes {
rec, err := readRecordHeaderOnly(r, offset, skippedRecordsTypes[i])
if err != nil {
if errors.Is(err, errMismatchRecordType) {
continue
}
return 0, err
}
offset += int64(rec.Length() + headerSize)
}
return offset, nil
}

208
godo/office/pptx.go

@ -0,0 +1,208 @@
package office
import (
"archive/zip"
"bytes"
"fmt"
"io"
"os"
"regexp"
"strconv"
"strings"
)
type PowerPoint struct {
Files []*zip.File
Slides map[string]string
NotesSlides map[string]string
Themes map[string]string
Images map[string]string
Presentation string
}
func ReadPowerPoint(path string) (*PowerPoint, error) {
var p PowerPoint
p.Slides = make(map[string]string)
p.NotesSlides = make(map[string]string)
p.Themes = make(map[string]string)
p.Images = make(map[string]string)
f, err := zip.OpenReader(path)
if err != nil {
return nil, fmt.Errorf("Error opening file" + err.Error())
}
p.Files = f.File
for _, file := range p.Files {
if strings.Contains(file.Name, "ppt/slides/slide") {
slideOpen, _ := file.Open()
p.Slides[file.Name] = string(readCloserToByte(slideOpen))
}
if strings.Contains(file.Name, "ppt/notesSlides/notesSlide") {
notesSlideOpen, _ := file.Open()
p.NotesSlides[file.Name] = string(readCloserToByte(notesSlideOpen))
}
if strings.Contains(file.Name, "ppt/theme/theme") {
themeOpen, _ := file.Open()
p.Themes[file.Name] = string(readCloserToByte(themeOpen))
}
if strings.Contains(file.Name, "ppt/media/image") {
imageOpen, _ := file.Open()
p.Images[file.Name] = string(readCloserToByte(imageOpen))
}
if strings.Contains(file.Name, "ppt/presentation.xml") {
presentationOpen, _ := file.Open()
p.Presentation = string(readCloserToByte(presentationOpen))
}
}
return &p, nil
}
func (p *PowerPoint) GetSlidesContent() []string {
var slides []string
for _, slide := range p.Slides {
slides = append(slides, slide)
}
return slides
}
// 只能删除文本编辑密码
func (p *PowerPoint) DeletePassWord() {
reg := regexp.MustCompile("<p:modifyVerifier(.*?)/>")
p.Presentation = reg.ReplaceAllString(p.Presentation, "")
}
func (p *PowerPoint) GetSlideCount() int {
return len(p.Slides)
}
func (p *PowerPoint) GetNotesSlideCount() int {
return len(p.NotesSlides)
}
func (p *PowerPoint) GetThemeCount() int {
return len(p.Themes)
}
func (p *PowerPoint) FindSlideString(findString string) []int {
var nums []int
reg := regexp.MustCompile(`\d+`)
for k, v := range p.Slides {
if strings.Contains(v, findString) {
num := reg.FindString(k)
n, _ := strconv.Atoi(num)
nums = append(nums, n)
}
}
return nums
}
func (p *PowerPoint) DeleteSlide(index int) error {
if index <= 0 {
index = len(p.Slides)
}
if index > len(p.Slides) {
return fmt.Errorf("index out of range")
}
p.Slides[fmt.Sprintf("ppt/slides/slide%d.xml", index)] = " "
for {
if index == len(p.Slides) {
break
} else {
p.Slides[fmt.Sprintf("ppt/slides/slide%d.xml", index)], p.Slides[fmt.Sprintf("ppt/slides/slide%d.xml", index+1)] = p.Slides[fmt.Sprintf("ppt/slides/slide%d.xml", index+1)], p.Slides[fmt.Sprintf("ppt/slides/slide%d.xml", index)]
index++
}
}
//通过在p.slides、p.notesslides、p.files中删除对应的页面。删除是成功的,但生成的pptx无法打开。目前只能把需要删除的页置空并放到最后一页。
//delete(p.Slides, fmt.Sprintf("ppt/slides/slide%d.xml", len(p.Slides)))
//delete(p.NotesSlides, fmt.Sprintf("ppt/notesSlides/notesSlide%d.xml", len(p.NotesSlides)))
//for k, v := range p.Files {
// if strings.Contains(v.Name, fmt.Sprintf("ppt/slides/slide%d.xml", len(p.Slides)+1)) {
// p.Files = append(p.Files[:k], p.Files[k+1:]...)
// }
// if strings.Contains(v.Name, fmt.Sprintf("ppt/notesSlides/notesSlide%d.xml", len(p.NotesSlides)+1)) {
// p.Files= append(p.Files[:k], p.Files[k+1:]...)
// }
//}
return nil
}
func (p *PowerPoint) ReplaceSlideContent(oldString string, newString string, num int) {
for k, v := range p.Slides {
p.Slides[k] = strings.Replace(v, oldString, newString, num)
}
}
func (p *PowerPoint) ReplaceNotesSlideContent(oldString string, newString string, num int) {
for k, v := range p.NotesSlides {
p.NotesSlides[k] = strings.Replace(v, oldString, newString, num)
}
}
func (p *PowerPoint) ReplaceThemeName(oldString string, newString string, num int) {
for k, v := range p.Themes {
p.Themes[k] = strings.Replace(v, oldString, newString, num)
}
}
func (p *PowerPoint) ReplaceImage(newImagePath string, index int) error {
if index > len(p.Images) {
return fmt.Errorf("index out of range")
}
newImageOpen, _ := os.ReadFile(newImagePath)
newImageStr := string(newImageOpen)
for k := range p.Images {
if strings.Contains(k, fmt.Sprintf("ppt/media/image%d.", index)) {
p.Images[k] = newImageStr
}
}
return nil
}
func (p *PowerPoint) WriteToFile(path string) (err error) {
var target *os.File
target, err = os.Create(path)
if err != nil {
return
}
defer target.Close()
err = p.Write(target)
return
}
func (p *PowerPoint) Write(ioWriter io.Writer) (err error) {
w := zip.NewWriter(ioWriter)
defer w.Close()
for _, file := range p.Files {
var writer io.Writer
var readCloser io.ReadCloser
writer, err = w.Create(file.Name)
if err != nil {
return err
}
if strings.Contains(file.Name, "ppt/slides/slide") && p.Slides[file.Name] != "" {
writer.Write([]byte(p.Slides[file.Name]))
} else if strings.Contains(file.Name, "ppt/notesSlides/notesSlide") && p.NotesSlides[file.Name] != "" {
writer.Write([]byte(p.NotesSlides[file.Name]))
} else if strings.Contains(file.Name, "ppt/theme/theme") && p.Themes[file.Name] != "" {
writer.Write([]byte(p.Themes[file.Name]))
} else if file.Name == "ppt/presentation.xml" {
writer.Write([]byte(p.Presentation))
} else if strings.Contains(file.Name, "ppt/media/image") && p.Images[file.Name] != "" {
writer.Write([]byte(p.Images[file.Name]))
} else {
readCloser, _ = file.Open()
writer.Write(readCloserToByte(readCloser))
}
}
return
}
func readCloserToByte(stream io.Reader) []byte {
buf := new(bytes.Buffer)
buf.ReadFrom(stream)
return buf.Bytes()
}

164
godo/office/reader.go

@ -0,0 +1,164 @@
package office
import (
"encoding/binary"
"errors"
"io"
"slices"
)
const headerSize = 8
// recordType is an enumeration that specifies the record type of an atom record or a container record
// ([MS-PPT] 2.13.24 RecordType)
type recordType uint16
const (
recordTypeUnspecified recordType = 0
recordTypeDocument recordType = 0x03E8
recordTypeSlide recordType = 0x03EE
recordTypeEnvironment recordType = 0x03F2
recordTypeSlidePersistAtom recordType = 0x03F3
recordTypeSlideShowSlideInfoAtom recordType = 0x03F9
recordTypeExternalObjectList recordType = 0x0409
recordTypeDrawingGroup recordType = 0x040B
recordTypeDrawing recordType = 0x040C
recordTypeList recordType = 0x07D0
recordTypeSoundCollection recordType = 0x07E4
recordTypeTextCharsAtom recordType = 0x0FA0
recordTypeTextBytesAtom recordType = 0x0FA8
recordTypeHeadersFooters recordType = 0x0FD9
recordTypeSlideListWithText recordType = 0x0FF0
recordTypeUserEditAtom recordType = 0x0FF5
recordTypePersistDirectoryAtom recordType = 0x1772
recordTypeRoundTripSlideSyncInfo12 recordType = 0x3714
)
type readerAtAdapter struct {
r io.Reader
readBytes []byte
}
func ToReaderAt(r io.Reader) io.ReaderAt {
ra, ok := r.(io.ReaderAt)
if ok {
return ra
}
return &readerAtAdapter{
r: r,
}
}
func (r *readerAtAdapter) ReadAt(p []byte, off int64) (n int, err error) {
if int(off)+len(p) > len(r.readBytes) {
err := r.expandBuffer(int(off) + len(p))
if err != nil {
return 0, err
}
}
return bytesReaderAt(r.readBytes).ReadAt(p, off)
}
func (r *readerAtAdapter) expandBuffer(newSize int) error {
if cap(r.readBytes) < newSize {
r.readBytes = slices.Grow(r.readBytes, newSize-cap(r.readBytes))
}
newPart := r.readBytes[len(r.readBytes):newSize]
n, err := r.r.Read(newPart)
switch {
case err == nil:
r.readBytes = r.readBytes[:newSize]
case errors.Is(err, io.EOF):
r.readBytes = r.readBytes[:len(r.readBytes)+n]
default:
return err
}
return nil
}
func BytesReadAt(src []byte, dst []byte, off int64) (n int, err error) {
return bytesReaderAt(src).ReadAt(dst, off)
}
type bytesReaderAt []byte
func (bra bytesReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
idx := 0
for i := int(off); i < len(bra) && idx < len(p); i, idx = i+1, idx+1 {
p[idx] = bra[i]
}
if idx != len(p) {
return idx, io.EOF
}
return idx, nil
}
// LowerPart returns lower byte of record type
func (r recordType) LowerPart() byte {
const fullByte = 0xFF
return byte(r & fullByte)
}
var errMismatchRecordType = errors.New("mismatch record type")
type record struct {
header [headerSize]byte
recordData
}
// Type returns recordType of record contained in it's header
func (r record) Type() recordType {
return recordType(binary.LittleEndian.Uint16(r.header[2:4]))
}
// Length returns data length contained in record header
func (r record) Length() uint32 {
return binary.LittleEndian.Uint32(r.header[4:8])
}
// Data returns all data from record except header
func (r record) Data() []byte {
return r.recordData
}
type recordData []byte
// ReadAt copies bytes from record data at given offset into buffer p
func (rd recordData) ReadAt(p []byte, off int64) (n int, err error) {
return BytesReadAt(rd, p, off)
}
// LongAt interprets 4 bytes of record data at given offset as uint32 value and returns it
func (rd recordData) LongAt(offset int) uint32 {
return binary.LittleEndian.Uint32(rd[offset:])
}
// readRecord reads header and data of record. If wantedType is specified (not equals recordTypeUnspecified),
// also compares read type with the wanted one and returns an error is they are not equal
func readRecord(f io.ReaderAt, offset int64, wantedType recordType) (record, error) {
r, err := readRecordHeaderOnly(f, offset, wantedType)
if err != nil {
return record{}, err
}
r.recordData = make([]byte, r.Length())
_, err = f.ReadAt(r.recordData, offset+headerSize)
if err != nil {
return record{}, err
}
return r, nil
}
// readRecordHeaderOnly reads header of record. If wantedType is specified (not equals recordTypeUnspecified),
// also compares read type with the wanted one and returns an error is they are not equal
func readRecordHeaderOnly(f io.ReaderAt, offset int64, wantedType recordType) (record, error) {
r := record{}
_, err := f.ReadAt(r.header[:], offset)
if err != nil {
return record{}, err
}
if wantedType != recordTypeUnspecified && r.Type() != wantedType {
return record{}, errMismatchRecordType
}
return r, nil
}

362
godo/office/rtf.go

@ -0,0 +1,362 @@
// Package rtftxt extracts text from .rtf documents
package office
import (
"bytes"
"fmt"
"io"
"os"
"strconv"
"strings"
"time"
"github.com/EndFirstCorp/peekingReader"
)
// ToStr converts a .rtf document file to string
func rtf2txt(filename string) (string, error) {
content, err := os.ReadFile(filename)
if err != nil {
return "", err
}
return BytesToStr(content)
}
// BytesToStr converts a []byte representation of a .rtf document file to string
func BytesToStr(data []byte) (string, error) {
reader := bytes.NewReader(data)
r, err := Text(reader)
if err != nil {
return "", err
}
s := r.String()
return s, nil
}
type stack struct {
top *element
size int
}
type element struct {
value string
next *element
}
func (s *stack) Len() int {
return s.size
}
func (s *stack) Push(value string) {
s.top = &element{value, s.top}
s.size++
}
func (s *stack) Peek() string {
if s.size == 0 {
return ""
}
return s.top.value
}
func (s *stack) Pop() string {
if s.size > 0 {
var v string
v, s.top = s.top.value, s.top.next
s.size--
return v
}
return ""
}
// Text is used to convert an io.Reader containing RTF data into
// plain text
func Text(r io.Reader) (*bytes.Buffer, error) {
pr := peekingReader.NewBufReader(r)
var text bytes.Buffer
var symbolStack stack
for b, err := pr.ReadByte(); err == nil; b, err = pr.ReadByte() {
switch b {
case '\\':
err := readControl(pr, &symbolStack, &text)
if err != nil {
return nil, err
}
case '{', '}':
case '\n', '\r': // noop
default:
text.WriteByte(b)
}
}
return &text, nil
}
func readControl(r peekingReader.Reader, s *stack, text *bytes.Buffer) error {
control, num, err := tokenizeControl(r)
if err != nil {
return err
}
if control == "*" { // this is an extended control sequence
err = readUntilClosingBrace(r)
if err != nil {
return err
}
if last := s.Peek(); last != "" {
val, err := getParams(r) // last control was interrupted, so finish handling Params
handleParams(control, val, text)
return err
}
return nil
}
if isUnicode, u := getUnicode(control); isUnicode {
text.WriteString(u)
return nil
}
if control == "" {
p, err := r.Peek(1)
if err != nil {
return err
}
if p[0] == '\\' || p[0] == '{' || p[0] == '}' { // this is an escaped character
text.WriteByte(p[0])
r.ReadByte()
return nil
}
text.WriteByte('\n')
return nil
}
if control == "binN" {
return handleBinary(r, control, num)
}
if symbol, found := convertSymbol(control); found {
text.WriteString(symbol)
}
val, err := getParams(r)
if err != nil {
return err
}
handleParams(control, val, text)
s.Push(control)
return nil
}
func tokenizeControl(r peekingReader.Reader) (string, int, error) {
var buf bytes.Buffer
isHex := false
numStart := -1
for {
p, err := r.Peek(1)
if err != nil {
return "", -1, err
}
b := p[0]
switch {
case b == '*' && buf.Len() == 0:
r.ReadByte() // consume valid digit
return "*", -1, nil
case b == '\'' && buf.Len() == 0:
isHex = true
buf.WriteByte(b)
r.ReadByte() // consume valid character
// read 2 bytes for hex
for i := 0; i < 2; i++ {
b, err = r.ReadByte() // consume valid digit
if err != nil {
return "", -1, err
}
buf.WriteByte(b)
}
return buf.String(), -1, nil
case b >= '0' && b <= '9' || b == '-':
if numStart == -1 {
numStart = buf.Len()
} else if numStart == 0 {
return "", -1, fmt.Errorf("unexpected control sequence. Cannot begin with digit")
}
buf.WriteByte(b)
r.ReadByte() // consume valid digit
case b >= 'a' && b <= 'z' || b >= 'A' && b <= 'Z':
if numStart > 0 { // we've already seen alpha character(s) plus digit(s)
c, num := canonicalize(buf.String(), numStart)
return c, num, nil
}
buf.WriteByte(b)
r.ReadByte()
default:
if isHex {
return buf.String(), -1, nil
}
c, num := canonicalize(buf.String(), numStart)
return c, num, nil
}
}
}
func canonicalize(control string, numStart int) (string, int) {
if numStart == -1 || numStart >= len(control) {
return control, -1
}
num, err := strconv.Atoi(control[numStart:])
if err != nil {
return control, -1
}
return control[:numStart] + "N", num
}
func getUnicode(control string) (bool, string) {
if len(control) < 2 || control[0] != '\'' {
return false, ""
}
var buf bytes.Buffer
for i := 1; i < len(control); i++ {
b := control[i]
if b >= '0' && b <= '9' || b >= 'a' && b <= 'f' || b >= 'A' && b <= 'F' {
buf.WriteByte(b)
} else {
break
}
}
after := control[buf.Len()+1:]
num, _ := strconv.ParseInt(buf.String(), 16, 16)
return true, fmt.Sprintf("%c%s", num, after)
}
func getParams(r peekingReader.Reader) (string, error) {
data, err := peekingReader.ReadUntilAny(r, []byte{'\\', '{', '}', '\n', '\r', ';'})
if err != nil {
return "", err
}
p, err := r.Peek(1)
if err != nil {
return "", err
}
if p[0] == ';' { // skip next if it is a semicolon
r.ReadByte()
}
return string(data), nil
}
func handleBinary(r peekingReader.Reader, control string, size int) error {
if control != "binN" { // wrong control type
return nil
}
_, err := r.ReadBytes(size)
if err != nil {
return err
}
return nil
}
func readUntilClosingBrace(r peekingReader.Reader) error {
count := 1
var b byte
var err error
for b, err = r.ReadByte(); err == nil; b, err = r.ReadByte() {
switch b {
case '{':
count++
case '}':
count--
}
if count == 0 {
return nil
}
}
return err
}
func handleParams(control, param string, text io.StringWriter) {
param = strings.TrimPrefix(param, " ")
if param == "" {
return
}
switch control {
case "fldrslt":
text.WriteString(param)
case "acccircle", "acccomma", "accdot", "accnone", "accunderdot",
"animtextN", "b", "caps", "cbN", "cchsN ", "cfN", "charscalexN",
"csN", "dnN", "embo", "expndN", "expndtwN ", "fittextN", "fN",
"fsN", "i", "kerningN ", "langfeN", "langfenpN", "langN", "langnpN",
"ltrch", "noproof", "nosupersub ", "outl", "plain", "rtlch", "scaps",
"shad", "strike", "sub ", "super ", "ul", "ulcN", "uld", "uldash",
"uldashd", "uldashdd", "uldb", "ulhwave", "ulldash", "ulnone", "ulth",
"ulthd", "ulthdash", "ulthdashd", "ulthdashdd", "ulthldash", "ululdbwave", "ulw", "ulwave", "upN", "v", "webhidden":
text.WriteString(param)
// Paragraph Formatting Properties
case "aspalpha", "aspnum", "collapsed", "contextualspace",
"cufiN", "culiN", "curiN", "faauto", "facenter",
"fafixed", "fahang", "faroman", "favar", "fiN", "hyphpar ",
"indmirror", "intbl", "itapN", "keep", "keepn", "levelN", "liN",
"linN", "lisaN", "lisbN", "ltrpar", "nocwrap", "noline", "nooverflow",
"nosnaplinegrid", "nowidctlpar ", "nowwrap", "outlinelevelN ", "pagebb",
"pard", "prauthN", "prdateN", "qc", "qd", "qj", "qkN", "ql", "qr", "qt",
"riN", "rinN", "rtlpar", "saautoN", "saN", "sbautoN", "sbN", "sbys",
"slmultN", "slN", "sN", "spv", "subdocumentN ", "tscbandhorzeven",
"tscbandhorzodd", "tscbandverteven", "tscbandvertodd", "tscfirstcol",
"tscfirstrow", "tsclastcol", "tsclastrow", "tscnecell", "tscnwcell",
"tscsecell", "tscswcell", "txbxtwalways", "txbxtwfirst", "txbxtwfirstlast",
"txbxtwlast", "txbxtwno", "widctlpar", "ytsN":
text.WriteString(param)
// Section Formatting Properties
case "adjustright", "binfsxnN", "binsxnN", "colnoN ", "colsN", "colsrN ", "colsxN", "colwN ", "dsN", "endnhere", "footeryN", "guttersxnN", "headeryN", "horzsect", "linebetcol", "linecont", "linemodN", "lineppage", "linerestart", "linestartsN", "linexN", "lndscpsxn", "ltrsect", "margbsxnN", "marglsxnN", "margmirsxn", "margrsxnN", "margtsxnN", "pghsxnN", "pgnbidia", "pgnbidib", "pgnchosung", "pgncnum", "pgncont", "pgndbnum", "pgndbnumd", "pgndbnumk", "pgndbnumt", "pgndec", "pgndecd", "pgnganada", "pgngbnum", "pgngbnumd", "pgngbnumk", "pgngbnuml", "pgnhindia", "pgnhindib", "pgnhindic", "pgnhindid", "pgnhnN ", "pgnhnsc ", "pgnhnsh ", "pgnhnsm ", "pgnhnsn ", "pgnhnsp ", "pgnid", "pgnlcltr", "pgnlcrm", "pgnrestart", "pgnstartsN", "pgnthaia", "pgnthaib", "pgnthaic", "pgnucltr", "pgnucrm", "pgnvieta", "pgnxN", "pgnyN", "pgnzodiac", "pgnzodiacd", "pgnzodiacl", "pgwsxnN", "pnseclvlN", "rtlsect", "saftnnalc", "saftnnar", "saftnnauc", "saftnnchi", "saftnnchosung", "saftnncnum", "saftnndbar", "saftnndbnum", "saftnndbnumd", "saftnndbnumk", "saftnndbnumt", "saftnnganada", "saftnngbnum", "saftnngbnumd", "saftnngbnumk", "saftnngbnuml", "saftnnrlc", "saftnnruc", "saftnnzodiac", "saftnnzodiacd", "saftnnzodiacl", "saftnrestart", "saftnrstcont", "saftnstartN", "sbkcol", "sbkeven", "sbknone", "sbkodd", "sbkpage", "sectd", "sectdefaultcl", "sectexpandN", "sectlinegridN", "sectspecifycl", "sectspecifygenN", "sectspecifyl", "sectunlocked", "sftnbj", "sftnnalc", "sftnnar", "sftnnauc", "sftnnchi", "sftnnchosung", "sftnncnum", "sftnndbar", "sftnndbnum", "sftnndbnumd", "sftnndbnumk", "sftnndbnumt", "sftnnganada", "sftnngbnum", "sftnngbnumd", "sftnngbnumk", "sftnngbnuml", "sftnnrlc", "sftnnruc", "sftnnzodiac", "sftnnzodiacd", "sftnnzodiacl", "sftnrestart", "sftnrstcont", "sftnrstpg", "sftnstartN", "sftntj", "srauthN", "srdateN", "titlepg", "vertal", "vertalb", "vertalc", "vertalj", "vertalt", "vertsect":
text.WriteString(param)
// Section Text
case "stextflowN":
text.WriteString(param)
// Special Characters
case "-", ":", "_", "{", "|", "}", "~", "bullet", "chatn", "chdate", "chdpa", "chdpl", "chftn", "chftnsep", "chftnsepc", "chpgn", "chtime", "column", "emdash", "emspace ",
"endash", "enspace ", "lbrN", "ldblquote", "line", "lquote", "ltrmark", "page", "par", "qmspace", "rdblquote", "row", "rquote", "rtlmark", "sect", "sectnum", "softcol ", "softlheightN ", "softline ", "softpage ", "tab", "zwbo", "zwj", "zwnbo", "zwnj":
text.WriteString(param)
// Table Definitions
case "cell", "cellxN", "clbgbdiag", "clbgcross", "clbgdcross", "clbgdkbdiag", "clbgdkcross", "clbgdkdcross", "clbgdkfdiag", "clbgdkhor", "clbgdkvert", "clbgfdiag", "clbghoriz", "clbgvert", "clbrdrb", "clbrdrl", "clbrdrr", "clbrdrt", "clcbpatN", "clcbpatrawN", "clcfpatN", "clcfpatrawN", "cldel2007", "cldelauthN", "cldeldttmN", "cldgll", "cldglu", "clFitText", "clftsWidthN", "clhidemark", "clins", "clinsauthN", "clinsdttmN", "clmgf", "clmrg", "clmrgd", "clmrgdauthN", "clmrgddttmN", "clmrgdr", "clNoWrap", "clpadbN", "clpadfbN", "clpadflN", "clpadfrN", "clpadftN", "clpadlN", "clpadrN", "clpadtN", "clshdngN", "clshdngrawN", "clshdrawnil", "clspbN", "clspfbN", "clspflN", "clspfrN", "clspftN", "clsplit", "clsplitr", "clsplN", "clsprN", "clsptN", "cltxbtlr", "cltxlrtb", "cltxlrtbv", "cltxtbrl", "cltxtbrlv", "clvertalb", "clvertalc", "clvertalt", "clvmgf", "clvmrg", "clwWidthN", "irowbandN", "irowN", "lastrow", "ltrrow", "nestcell", "nestrow", "nesttableprops", "nonesttables", "rawclbgbdiag", "rawclbgcross", "rawclbgdcross", "rawclbgdkbdiag", "rawclbgdkcross", "rawclbgdkdcross", "rawclbgdkfdiag", "rawclbgdkhor", "rawclbgdkvert", "rawclbgfdiag", "rawclbghoriz", "rawclbgvert", "rtlrow", "tabsnoovrlp", "taprtl", "tblindN", "tblindtypeN", "tbllkbestfit", "tbllkborder", "tbllkcolor", "tbllkfont", "tbllkhdrcols", "tbllkhdrrows", "tbllklastcol", "tbllklastrow", "tbllknocolband", "tbllknorowband", "tbllkshading", "tcelld", "tdfrmtxtBottomN", "tdfrmtxtLeftN", "tdfrmtxtRightN", "tdfrmtxtTopN", "tphcol", "tphmrg", "tphpg", "tposnegxN", "tposnegyN", "tposxc", "tposxi", "tposxl", "tposxN", "tposxo", "tposxr", "tposyb", "tposyc", "tposyil", "tposyin", "tposyN", "tposyout", "tposyt", "tpvmrg", "tpvpara", "tpvpg", "trauthN", "trautofitN", "trbgbdiag", "trbgcross", "trbgdcross", "trbgdkbdiag", "trbgdkcross", "trbgdkdcross", "trbgdkfdiag", "trbgdkhor", "trbgdkvert", "trbgfdiag", "trbghoriz", "trbgvert", "trbrdrb ", "trbrdrh ", "trbrdrl ", "trbrdrr ", "trbrdrt ", "trbrdrv ", "trcbpatN", "trcfpatN", "trdateN", "trftsWidthAN", "trftsWidthBN", "trftsWidthN", "trgaphN", "trhdr ", "trkeep ", "trkeepfollow", "trleftN", "trowd", "trpaddbN", "trpaddfbN", "trpaddflN", "trpaddfrN", "trpaddftN", "trpaddlN", "trpaddrN", "trpaddtN", "trpadobN", "trpadofbN", "trpadoflN", "trpadofrN", "trpadoftN", "trpadolN", "trpadorN", "trpadotN", "trpatN", "trqc", "trql", "trqr", "trrhN", "trshdngN", "trspdbN", "trspdfbN", "trspdflN", "trspdfrN", "trspdftN", "trspdlN", "trspdrN", "trspdtN", "trspobN", "trspofbN", "trspoflN", "trspofrN", "trspoftN", "trspolN", "trsporN", "trspotN", "trwWidthAN", "trwWidthBN", "trwWidthN":
text.WriteString(param)
// Table of Contents Entries
case "tc", "tcfN", "tclN", "tcn ":
text.WriteString(param)
// Tabs
case "tbN", "tldot", "tleq", "tlhyph", "tlmdot", "tlth", "tlul", "tqc", "tqdec", "tqr", "txN":
text.WriteString(param)
}
}
func convertSymbol(symbol string) (string, bool) {
switch symbol {
case "bullet":
return "*", true
case "chdate", "chdpa", "chdpl":
return time.Now().Format("2005-01-02"), true
case "chtime":
return time.Now().Format("4:56 pm"), true
case "emdash", "endash":
return "-", true
case "lquote", "rquote":
return "'", true
case "ldblquote", "rdblquote":
return "\"", true
case "line", "lbrN":
return "\n", true
case "cell", "column", "emspace", "enspace", "qmspace", "nestcell", "nestrow", "page", "par", "row", "sect", "tab":
return " ", true
case "|", "~", "-", "_", ":":
return symbol, true
case "chatn", "chftn", "chftnsep", "chftnsepc", "chpgn", "sectnum", "ltrmark", "rtlmark", "zwbo", "zwj", "zwnbo", "zwnj", "softcol",
"softline", "softpage":
return "", true
default:
return "", false
}
}

41
godo/office/txt.go

@ -0,0 +1,41 @@
package office
import (
"bufio"
"os"
"regexp"
"strings"
)
func text2txt(filename string) (string, error) {
file, err := os.Open(filename)
if err != nil {
return "", err
}
defer file.Close()
var lines []string
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
// 移除行内的换行符
line = strings.ReplaceAll(line, "\r", "")
lines = append(lines, line)
}
if err := scanner.Err(); err != nil {
return "", err
}
// 合并所有行
content := strings.Join(lines, " ")
// 移除多余的空格
re := regexp.MustCompile(`\s+`)
content = re.ReplaceAllString(content, " ")
// 移除开头和结尾的空格
content = strings.TrimSpace(content)
return content, nil
}

46
godo/office/types.go

@ -0,0 +1,46 @@
package office
import (
"regexp"
"time"
)
const maxBytes = 1024 << 20 // 1GB
const ISO string = "2006-01-02T15:04:05"
var TAG_RE = regexp.MustCompile(`(<[^>]*>)+`)
var PARA_RE = regexp.MustCompile(`(</[a-z]:p>)+`)
var DEBUG bool = false
type Document struct {
path string
RePath string `json:"path"`
Filename string `json:"filename"`
Title string `json:"title"`
Subject string `json:"subject"`
Creator string `json:"creator"`
Keywords string `json:"keywords"`
Description string `json:"description"`
Lastmodifiedby string `json:"lastModifiedBy"`
Revision string `json:"revision"`
Category string `json:"category"`
Content string `json:"content"`
Modifytime time.Time `json:"modified"`
Createtime time.Time `json:"created"`
Accesstime time.Time `json:"accessed"`
Size int `json:"size"`
}
type DocReader func(string) (string, error)
type XMLContent struct {
Title string `xml:"title"`
Subject string `xml:"subject"`
Creator string `xml:"creator"`
Keywords string `xml:"keywords"`
Description string `xml:"description"`
LastModifiedBy string `xml:"lastModifiedBy"`
Revision string `xml:"revision"`
Created string `xml:"created"`
Modified string `xml:"modified"`
Category string `xml:"category"`
}

27
godo/office/windows.go

@ -0,0 +1,27 @@
//go:build windows
// +build windows
package office
import (
"os"
"syscall"
"time"
)
func getFileInfoData(data *Document) (bool, error) {
fileinfo, err := os.Stat(data.path)
if err != nil {
return false, err
}
data.Filename = fileinfo.Name()
data.Title = data.Filename
data.Size = int(fileinfo.Size())
stat := fileinfo.Sys().(*syscall.Win32FileAttributeData)
data.Createtime = time.Unix(0, stat.CreationTime.Nanoseconds())
data.Modifytime = time.Unix(0, stat.LastWriteTime.Nanoseconds())
data.Accesstime = time.Unix(0, stat.LastAccessTime.Nanoseconds())
return true, nil
}

125
godo/office/xls.go

@ -0,0 +1,125 @@
package office
import (
"bytes"
"fmt"
"io"
"strings"
"godo/office/xls"
)
func XLS2Text(reader io.ReadSeeker) (string, error) {
xlFile, err := xls.OpenReader(reader, "utf-8")
if err != nil || xlFile == nil {
return "", err
}
extracted_text := ""
for n := 0; n < xlFile.NumSheets(); n++ {
if sheet1 := xlFile.GetSheet(n); sheet1 != nil {
if extracted_text != "" {
extracted_text = fmt.Sprintf("%s\n%s", extracted_text, xlGenerateSheetTitle(sheet1.Name, n, int(sheet1.MaxRow)))
} else {
extracted_text = fmt.Sprintf("%s%s", extracted_text, xlGenerateSheetTitle(sheet1.Name, n, int(sheet1.MaxRow)))
}
for m := 0; m <= int(sheet1.MaxRow); m++ {
row1 := sheet1.Row(m)
if row1 == nil {
continue
}
rowText := ""
// go through all columns
for c := row1.FirstCol(); c < row1.LastCol(); c++ {
if text := row1.Col(c); text != "" {
text = cleanCell(text)
if c > row1.FirstCol() {
rowText += ", "
}
rowText += text
}
}
if extracted_text != "" {
extracted_text = fmt.Sprintf("%s\n%s", extracted_text, rowText)
} else {
extracted_text = fmt.Sprintf("%s%s", extracted_text, rowText)
}
}
}
}
return extracted_text, nil
}
// cleanCell returns a cleaned cell text without new-lines
func cleanCell(text string) string {
text = strings.ReplaceAll(text, "\n", " ")
text = strings.ReplaceAll(text, "\r", "")
text = strings.TrimSpace(text)
return text
}
func xlGenerateSheetTitle(name string, number, rows int) (title string) {
if number > 0 {
title += "\n"
}
title += fmt.Sprintf("Sheet \"%s\" (%d rows):\n", name, rows)
return title
}
// func writeOutput(writer io.Writer, output []byte, alreadyWritten *int64, size *int64) (err error) {
// if int64(len(output)) > *size {
// output = output[:*size]
// }
// *size -= int64(len(output))
// writtenOut, err := writer.Write(output)
// *alreadyWritten += int64(writtenOut)
// return err
// }
// IsFileXLS checks if the data indicates a XLS file
// XLS has a signature of D0 CF 11 E0 A1 B1 1A E1
func IsFileXLS(data []byte) bool {
return bytes.HasPrefix(data, []byte{0xD0, 0xCF, 0x11, 0xE0, 0xA1, 0xB1, 0x1A, 0xE1})
}
// XLS2Cells converts an XLS file to individual cells
func XLS2Cells(reader io.ReadSeeker) (cells []string, err error) {
xlFile, err := xls.OpenReader(reader, "utf-8")
if err != nil || xlFile == nil {
return nil, err
}
for n := 0; n < xlFile.NumSheets(); n++ {
if sheet1 := xlFile.GetSheet(n); sheet1 != nil {
for m := 0; m <= int(sheet1.MaxRow); m++ {
row1 := sheet1.Row(m)
if row1 == nil {
continue
}
for c := row1.FirstCol(); c < row1.LastCol(); c++ {
if text := row1.Col(c); text != "" {
text = cleanCell(text)
cells = append(cells, text)
}
}
}
}
}
return
}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save