From cb7cca57b22e6ffc29e64c1bb674628698d23a20 Mon Sep 17 00:00:00 2001 From: godo Date: Thu, 5 Dec 2024 18:21:23 +0800 Subject: [PATCH] add new models --- frontend/src/stores/labels/chatglm.ts | 32 +++++++++++++++++++++++ frontend/src/stores/labels/index.ts | 6 +++++ frontend/src/stores/labels/neuralchat.ts | 33 ++++++++++++++++++++++++ frontend/src/stores/labels/starlinglm.ts | 33 ++++++++++++++++++++++++ 4 files changed, 104 insertions(+) create mode 100644 frontend/src/stores/labels/chatglm.ts create mode 100644 frontend/src/stores/labels/neuralchat.ts create mode 100644 frontend/src/stores/labels/starlinglm.ts diff --git a/frontend/src/stores/labels/chatglm.ts b/frontend/src/stores/labels/chatglm.ts new file mode 100644 index 0000000..9d7d2dc --- /dev/null +++ b/frontend/src/stores/labels/chatglm.ts @@ -0,0 +1,32 @@ +export const chatglmLabels = { + name: "chatglm", + family: "llama", + action: "chat", + models: [ + { + model: "EntropyYue/chatglm3:6b", + params: { + stream: true, + "stop": [ + "<|system|>", + "<|user|>", + "<|assistant|>" + ], + "temperature": 0.7, + "top_k": 5, + "top_p": 0.8 + }, + info: { + engine: "ollama", + from: "ollama", + size: "3.6GB", + desk: "4GB", + cpu: "16GB", + gpu: "8GB", + quant: "q4" + } + }, + ], + zhdesc: "ChatGLM是由清华技术成果转化的公司智谱AI发布的开源的、支持中英双语问答的对话语言模型系列,并针对中文进行了优化,该模型基于General Language Model(GLM)架构构建", + endesc: "ChatGLM is an open-source dialogue language model series released by Zhipu AI, a company that transforms technology achievements from Tsinghua University. It supports bilingual Q&A in both Chinese and English and has been optimized for Chinese. The model is built on the General Language Model (GLM) architecture" +} \ No newline at end of file diff --git a/frontend/src/stores/labels/index.ts b/frontend/src/stores/labels/index.ts index 1a3e1ad..3f35381 100644 --- a/frontend/src/stores/labels/index.ts +++ b/frontend/src/stores/labels/index.ts @@ -1,5 +1,6 @@ import { qwenLabels } from './qwen.ts' import { gemmaLabels } from './gemma.ts' +import { chatglmLabels } from './chatglm.ts' import { llamaLabels } from './llama.ts' import { internlmLabels } from './internlm.ts' import { mingyiLabels } from './mingyi.ts' @@ -8,6 +9,8 @@ import { llavaLabels } from './llava.ts' import { bakllavaLabels } from './bakllava.ts' import { minicpmLabels } from './minicpm.ts' import { moondreamLabels } from './moondream.ts' +import { starlinglmLabels } from './starlinglm.ts' +import { neuralchatLabels } from './neuralchat.ts' import { phiLabels } from './phi.ts' import { openchatLabels } from './openchat.ts' import { ayaLabels } from './aya.ts' @@ -41,6 +44,7 @@ import { vitsLabel } from './vits.ts' export const aiLabels = [ qwenLabels, gemmaLabels, + chatglmLabels, llamaLabels, internlmLabels, mingyiLabels, @@ -49,6 +53,8 @@ export const aiLabels = [ bakllavaLabels, minicpmLabels, moondreamLabels, + starlinglmLabels, + neuralchatLabels, phiLabels, openchatLabels, ayaLabels, diff --git a/frontend/src/stores/labels/neuralchat.ts b/frontend/src/stores/labels/neuralchat.ts new file mode 100644 index 0000000..3e700e4 --- /dev/null +++ b/frontend/src/stores/labels/neuralchat.ts @@ -0,0 +1,33 @@ +export const neuralchatLabels = { + name: "neural-chat", + family: "llama", + action: "chat", + models: [ + { + model: "neural-chat:latest", + params: { + stream: true, + "num_ctx": 4096, + "stop": [ + "", + "<|im_start|>", + "<|im_end|>" + ], + "temperature": 0.7, + "top_k": 5, + "top_p": 0.8 + }, + info: { + engine: "ollama", + from: "ollama", + size: "4.1GB", + desk: "5GB", + cpu: "16GB", + gpu: "8GB", + quant: "q4" + } + }, + ], + zhdesc: "基于Mistral的微调模型,具有良好的领域和语言覆盖率。", + endesc: "A fine-tuned model based on Mistral with good coverage of domain and language. " +} \ No newline at end of file diff --git a/frontend/src/stores/labels/starlinglm.ts b/frontend/src/stores/labels/starlinglm.ts new file mode 100644 index 0000000..d0b43db --- /dev/null +++ b/frontend/src/stores/labels/starlinglm.ts @@ -0,0 +1,33 @@ +export const starlinglmLabels = { + name: "starling-lm", + family: "llama", + action: "chat", + models: [ + { + model: "starling-lm:latest", + params: { + stream: true, + "stop": [ + "<|endoftext|>", + "<|end_of_turn|>", + "Human:", + "Assistant:" + ], + "temperature": 0.7, + "top_k": 5, + "top_p": 0.8 + }, + info: { + engine: "ollama", + from: "ollama", + size: "4.1GB", + desk: "5GB", + cpu: "16GB", + gpu: "8GB", + quant: "q4" + } + }, + ], + zhdesc: "Starling是一个大型语言模型,通过人工智能反馈的强化学习进行训练,专注于提高聊天机器人的有用性。", + endesc: "Starling is a large language model trained by reinforcement learning from AI feedback focused on improving chatbot helpfulness. " +} \ No newline at end of file