diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..5f42ed0
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,14 @@
+# Contributing
+We are greating for all improvements, bug fixes and new features if it not break exists behaviour.
+
+## Code style
+Code style described in stylecop ruleset file, checks perform on compile time. All violations of rules breaks project build.
+Principal moments:
+* use tabs
+* private fields named with prefix "_"
+
+## PR
+For PR need:
+* Issue described what problem you solve
+* Code passed travis build (starts automatically)
+* Code is covered by tests
diff --git a/README.en.md b/README.en.md
index f032e05..66d39f9 100644
--- a/README.en.md
+++ b/README.en.md
@@ -187,4 +187,16 @@ wails build
- [pptist](https://github.com/pipipi-pikachu/PPTist)
- [vditor](https://github.com/Vanessa219/vditor)
- [mind-map](https://github.com/wanglin2/mind-map)
-- [canvas-editor](https://github.com/Hufe921/canvas-editor)
\ No newline at end of file
+- [canvas-editor](https://github.com/Hufe921/canvas-editor)
+
+### Contributing
+
+See [Contributing guide](CONTRIBUTING.md)
+
+### Contributors
+
+
+
+
+
+Made with [contributors-img](https://contributors-img.web.app).
\ No newline at end of file
diff --git a/README.md b/README.md
index 51ac4d0..19e693a 100644
--- a/README.md
+++ b/README.md
@@ -28,6 +28,7 @@
- 更改文档存储方式,支持选择文件夹
- 内网聊天新增ai对话,可保存对话历史,可更换模型和prompt
- 新增可定义端口和访问路径,支持web端系统重启
+- 新增企业端gitee登录和github登录
## 🏭 第三阶段目标(十二月底发布)
1. **文档处理与Markdown智能升级**:
diff --git a/cloud/README.md b/cloud/README.md
index 42d0208..d2e751d 100644
--- a/cloud/README.md
+++ b/cloud/README.md
@@ -1,4 +1,6 @@
-## 安装帮助
+## godocloud客户端安装帮助
+
+- cloud目录为企业版打包客户端而设定,可以自由定制客户端,根据以下步骤打包
### 第一步:安装nodejs
@@ -19,7 +21,7 @@ sudo chmod +x build.sh
```
#### windows环境下打包
-- 首先安装mingw-w64,进入命令行界面
+- 首先安装mingw-w32,进入命令行界面
```
./build.sh
@@ -27,3 +29,18 @@ sudo chmod +x build.sh
- 打包成功后每个系统的版本在dist目录下
+### 第三步:部署
+
+- 启动文件
+```
+sudo chmod +x ./godocloud_linux_amd64
+./godocloud_linux_amd64
+```
+- 访问地址:http://localhost:56781
+
+- 备注:如需更改端口地址,修改cloud/cmd/main.go中,改完之后要重新打包部署
+
+```
+const serverAddress = ":56781"
+```
+
diff --git a/frontend/src/components/chat/Chat.vue b/frontend/src/components/chat/Chat.vue
index 329cea9..f2a1f2e 100644
--- a/frontend/src/components/chat/Chat.vue
+++ b/frontend/src/components/chat/Chat.vue
@@ -1,46 +1,46 @@
@@ -48,103 +48,62 @@
-
+
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
+
+
-
+
-
+
-
+
-
+
-
+
{{ option.label }}
@@ -152,167 +111,169 @@
diff --git a/frontend/src/components/chat/ChatMenu.vue b/frontend/src/components/chat/ChatMenu.vue
index b499323..ec3fe20 100644
--- a/frontend/src/components/chat/ChatMenu.vue
+++ b/frontend/src/components/chat/ChatMenu.vue
@@ -1,34 +1,23 @@
-
+
@@ -36,21 +25,24 @@
-
+
+
+
-
+
diff --git a/frontend/src/stores/labels/chatglm.ts b/frontend/src/stores/labels/chatglm.ts
new file mode 100644
index 0000000..9d7d2dc
--- /dev/null
+++ b/frontend/src/stores/labels/chatglm.ts
@@ -0,0 +1,32 @@
+export const chatglmLabels = {
+ name: "chatglm",
+ family: "llama",
+ action: "chat",
+ models: [
+ {
+ model: "EntropyYue/chatglm3:6b",
+ params: {
+ stream: true,
+ "stop": [
+ "<|system|>",
+ "<|user|>",
+ "<|assistant|>"
+ ],
+ "temperature": 0.7,
+ "top_k": 5,
+ "top_p": 0.8
+ },
+ info: {
+ engine: "ollama",
+ from: "ollama",
+ size: "3.6GB",
+ desk: "4GB",
+ cpu: "16GB",
+ gpu: "8GB",
+ quant: "q4"
+ }
+ },
+ ],
+ zhdesc: "ChatGLM是由清华技术成果转化的公司智谱AI发布的开源的、支持中英双语问答的对话语言模型系列,并针对中文进行了优化,该模型基于General Language Model(GLM)架构构建",
+ endesc: "ChatGLM is an open-source dialogue language model series released by Zhipu AI, a company that transforms technology achievements from Tsinghua University. It supports bilingual Q&A in both Chinese and English and has been optimized for Chinese. The model is built on the General Language Model (GLM) architecture"
+}
\ No newline at end of file
diff --git a/frontend/src/stores/labels/index.ts b/frontend/src/stores/labels/index.ts
index 1a3e1ad..3f35381 100644
--- a/frontend/src/stores/labels/index.ts
+++ b/frontend/src/stores/labels/index.ts
@@ -1,5 +1,6 @@
import { qwenLabels } from './qwen.ts'
import { gemmaLabels } from './gemma.ts'
+import { chatglmLabels } from './chatglm.ts'
import { llamaLabels } from './llama.ts'
import { internlmLabels } from './internlm.ts'
import { mingyiLabels } from './mingyi.ts'
@@ -8,6 +9,8 @@ import { llavaLabels } from './llava.ts'
import { bakllavaLabels } from './bakllava.ts'
import { minicpmLabels } from './minicpm.ts'
import { moondreamLabels } from './moondream.ts'
+import { starlinglmLabels } from './starlinglm.ts'
+import { neuralchatLabels } from './neuralchat.ts'
import { phiLabels } from './phi.ts'
import { openchatLabels } from './openchat.ts'
import { ayaLabels } from './aya.ts'
@@ -41,6 +44,7 @@ import { vitsLabel } from './vits.ts'
export const aiLabels = [
qwenLabels,
gemmaLabels,
+ chatglmLabels,
llamaLabels,
internlmLabels,
mingyiLabels,
@@ -49,6 +53,8 @@ export const aiLabels = [
bakllavaLabels,
minicpmLabels,
moondreamLabels,
+ starlinglmLabels,
+ neuralchatLabels,
phiLabels,
openchatLabels,
ayaLabels,
diff --git a/frontend/src/stores/labels/neuralchat.ts b/frontend/src/stores/labels/neuralchat.ts
new file mode 100644
index 0000000..3e700e4
--- /dev/null
+++ b/frontend/src/stores/labels/neuralchat.ts
@@ -0,0 +1,33 @@
+export const neuralchatLabels = {
+ name: "neural-chat",
+ family: "llama",
+ action: "chat",
+ models: [
+ {
+ model: "neural-chat:latest",
+ params: {
+ stream: true,
+ "num_ctx": 4096,
+ "stop": [
+ "",
+ "<|im_start|>",
+ "<|im_end|>"
+ ],
+ "temperature": 0.7,
+ "top_k": 5,
+ "top_p": 0.8
+ },
+ info: {
+ engine: "ollama",
+ from: "ollama",
+ size: "4.1GB",
+ desk: "5GB",
+ cpu: "16GB",
+ gpu: "8GB",
+ quant: "q4"
+ }
+ },
+ ],
+ zhdesc: "基于Mistral的微调模型,具有良好的领域和语言覆盖率。",
+ endesc: "A fine-tuned model based on Mistral with good coverage of domain and language. "
+}
\ No newline at end of file
diff --git a/frontend/src/stores/labels/starlinglm.ts b/frontend/src/stores/labels/starlinglm.ts
new file mode 100644
index 0000000..d0b43db
--- /dev/null
+++ b/frontend/src/stores/labels/starlinglm.ts
@@ -0,0 +1,33 @@
+export const starlinglmLabels = {
+ name: "starling-lm",
+ family: "llama",
+ action: "chat",
+ models: [
+ {
+ model: "starling-lm:latest",
+ params: {
+ stream: true,
+ "stop": [
+ "<|endoftext|>",
+ "<|end_of_turn|>",
+ "Human:",
+ "Assistant:"
+ ],
+ "temperature": 0.7,
+ "top_k": 5,
+ "top_p": 0.8
+ },
+ info: {
+ engine: "ollama",
+ from: "ollama",
+ size: "4.1GB",
+ desk: "5GB",
+ cpu: "16GB",
+ gpu: "8GB",
+ quant: "q4"
+ }
+ },
+ ],
+ zhdesc: "Starling是一个大型语言模型,通过人工智能反馈的强化学习进行训练,专注于提高聊天机器人的有用性。",
+ endesc: "Starling is a large language model trained by reinforcement learning from AI feedback focused on improving chatbot helpfulness. "
+}
\ No newline at end of file
diff --git a/frontend/src/system/member.ts b/frontend/src/system/member.ts
index 9d412df..49641e5 100644
--- a/frontend/src/system/member.ts
+++ b/frontend/src/system/member.ts
@@ -5,7 +5,7 @@ export const memberList = [
content:"Chat",
frame: true,
width: 800,
- height: 650,
+ height: 600,
center: true,
resizable: true,
isDeskTop: true,