一、下载Ollama
在线安装:
在linux中输入命令curl -fsSL https://ollama.com/install.sh | sh
由于在linux下载ollama需要经过外网,网络会不稳定,很容易造成连接超时的问题。
离线安装:
步骤一: 下载Ollama离线版本
在linux服务器中输入命令:lscpu查看服务器型号
然后再该地址中下载Ollama离线版本:
https://github.com/ollama/ollama/releases
步骤二: 下载install.sh文件修改内容
地址为:https://ollama.com/install.sh
修改位置1:
注释掉在线下载ollama的命令
修改位置2:
修改ollama安装地址,将ollama离线版本与install放到一起
install.sh最终修改的版本:
#!/bin/sh# This script installs Ollama on Linux.# It detects the current operating system architecture and installs the appropriate version of Ollama.set-eustatus(){echo">>> $*">&2;}error(){echo"ERROR $*";exit1;}warning(){echo"WARNING: $*";}TEMP_DIR=$(mktemp -d)cleanup(){rm-rf$TEMP_DIR;}trap cleanup EXIT
available(){command-v$1>/dev/null;}require(){localMISSING=''forTOOLin$*;doif! available $TOOL;thenMISSING="$MISSING$TOOL"fidoneecho$MISSING}["$(uname-s)"="Linux"]|| error 'This script is intended to run on Linux only.'ARCH=$(uname-m)case"$ARCH"in
x86_64)ARCH="amd64";;
aarch64|arm64)ARCH="arm64";;
*) error "Unsupported architecture: $ARCH";;esacIS_WSL2=false
KERN=$(uname-r)case"$KERN"in
*icrosoft*WSL2 | *icrosoft*wsl2)IS_WSL2=true;;
*icrosoft) error "Microsoft WSL1 is not currently supported. Please upgrade to WSL2 with 'wsl --set-version <distro> 2'";;
*);;esacVER_PARAM="${OLLAMA_VERSION:+?version=$OLLAMA_VERSION}"SUDO=if["$(id-u)"-ne0];then# Running as root, no need for sudoif! available sudo;then
error "This script requires superuser permissions. Please re-run as root."fiSUDO="sudo"fiNEEDS=$(require curlawkgrepsedteexargs)if[-n"$NEEDS"];then
status "ERROR: The following tools are required but missing:"forNEEDin$NEEDS;doecho" - $NEED"doneexit1fi
status "Downloading ollama..."# curl --fail --show-error --location --progress-bar -o $TEMP_DIR/ollama "https://ollama.com/download/ollama-linux-${ARCH}${VER_PARAM}"forBINDIRin /usr/local/bin /usr/bin /bin;doecho$PATH|grep-q$BINDIR&&break||continuedone
status "Installing ollama to $BINDIR..."$SUDOinstall-o0-g0-m755-d$BINDIR# $SUDO install -o0 -g0 -m755 $TEMP_DIR/ollama $BINDIR/ollama$SUDOinstall-o0-g0-m755 ./ollama-linux-amd64 $BINDIR/ollama
install_success(){
status 'The Ollama API is now available at 127.0.0.1:11434.'
status 'Install complete. Run "ollama" from the command line.'}trap install_success EXIT
# Everything from this point onwards is optional.configure_systemd(){if!id ollama >/dev/null 2>&1;then
status "Creating ollama user..."$SUDOuseradd-r-s /bin/false -U-m-d /usr/share/ollama ollama
fiif getent group render >/dev/null 2>&1;then
status "Adding ollama user to render group..."$SUDOusermod-a-G render ollama
fiif getent group video >/dev/null 2>&1;then
status "Adding ollama user to video group..."$SUDOusermod-a-G video ollama
fi
status "Adding current user to ollama group..."$SUDOusermod-a-G ollama $(whoami)
status "Creating ollama systemd service..."cat<<EOF|$SUDOtee /etc/systemd/system/ollama.service >/dev/null
[Unit]
Description=Ollama Service
After=network-online.target
[Service]
ExecStart=$BINDIR/ollama serve
User=ollama
Group=ollama
Restart=always
RestartSec=3
Environment="PATH=$PATH"
[Install]
WantedBy=default.target
EOFSYSTEMCTL_RUNNING="$(systemctl is-system-running ||true)"case$SYSTEMCTL_RUNNINGin
running|degraded)
status "Enabling and starting ollama service..."$SUDO systemctl daemon-reload
$SUDO systemctl enable ollama
start_service(){$SUDO systemctl restart ollama;}trap start_service EXIT
;;esac}if available systemctl;then
configure_systemd
fi# WSL2 only supports GPUs via nvidia passthrough# so check for nvidia-smi to determine if GPU is availableif["$IS_WSL2"=true];thenif available nvidia-smi &&[-n"$(nvidia-smi |grep-o"CUDA Version: [0-9]*\.[0-9]*")"];then
status "Nvidia GPU detected."fi
install_success
exit0fi# Install GPU dependencies on Linuxif! available lspci &&! available lshw;then
warning "Unable to detect NVIDIA/AMD GPU. Install lspci or lshw to automatically detect and install GPU dependencies."exit0ficheck_gpu(){# Look for devices based on vendor ID for NVIDIA and AMDcase$1in
lspci)case$2in
nvidia) available lspci && lspci -d'10de:'|grep-q'NVIDIA'||return1;;
amdgpu) available lspci && lspci -d'1002:'|grep-q'AMD'||return1;;esac;;
lshw)case$2in
nvidia) available lshw &&$SUDO lshw -c display -numeric|grep-q'vendor: .* \[10DE\]'||return1;;
amdgpu) available lshw &&$SUDO lshw -c display -numeric|grep-q'vendor: .* \[1002\]'||return1;;esac;;
nvidia-smi) available nvidia-smi ||return1;;esac}if check_gpu nvidia-smi;then
status "NVIDIA GPU installed."exit0fiif! check_gpu lspci nvidia &&! check_gpu lshw nvidia &&! check_gpu lspci amdgpu &&! check_gpu lshw amdgpu;then
install_success
warning "No NVIDIA/AMD GPU detected. Ollama will run in CPU-only mode."exit0fiif check_gpu lspci amdgpu || check_gpu lshw amdgpu;then# Look for pre-existing ROCm v6 before downloading the dependenciesforsearchin"${HIP_PATH:-''}""${ROCM_PATH:-''}""/opt/rocm""/usr/lib64";doif[-n"${search}"]&&[-e"${search}/libhipblas.so.2"-o-e"${search}/lib/libhipblas.so.2"];then
status "Compatible AMD GPU ROCm library detected at ${search}"
install_success
exit0fidone
status "Downloading AMD GPU dependencies..."$SUDOrm-rf /usr/share/ollama/lib
$SUDOchmod o+x /usr/share/ollama
$SUDOinstall-o ollama -g ollama -m755-d /usr/share/ollama/lib/rocm
curl--fail --show-error --location --progress-bar "https://ollama.com/download/ollama-linux-amd64-rocm.tgz${VER_PARAM}"\|$SUDOtar zx --owner ollama --group ollama -C /usr/share/ollama/lib/rocm .
install_success
status "AMD GPU ready."exit0fi# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#rhel-7-centos-7# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#rhel-8-rocky-8# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#rhel-9-rocky-9# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#fedorainstall_cuda_driver_yum(){
status 'Installing NVIDIA repository...'case$PACKAGE_MANAGERin
yum)$SUDO$PACKAGE_MANAGER-yinstall yum-utils
$SUDO$PACKAGE_MANAGER-config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname-m)/cuda-$1$2.repo
;;
dnf)$SUDO$PACKAGE_MANAGER config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname-m)/cuda-$1$2.repo
;;esaccase$1in
rhel)
status 'Installing EPEL repository...'# EPEL is required for third-party dependencies such as dkms and libvdpau$SUDO$PACKAGE_MANAGER-yinstall https://dl.fedoraproject.org/pub/epel/epel-release-latest-$2.noarch.rpm ||true;;esac
status 'Installing CUDA driver...'if["$1"='centos']||["$1$2"='rhel7'];then$SUDO$PACKAGE_MANAGER-yinstall nvidia-driver-latest-dkms
fi$SUDO$PACKAGE_MANAGER-yinstall cuda-drivers
}# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#ubuntu# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#debianinstall_cuda_driver_apt(){
status 'Installing NVIDIA repository...'curl-fsSL-o$TEMP_DIR/cuda-keyring.deb https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname-m)/cuda-keyring_1.1-1_all.deb
case$1in
debian)
status 'Enabling contrib sources...'$SUDOsed's/main/contrib/'< /etc/apt/sources.list |$SUDOtee /etc/apt/sources.list.d/contrib.list > /dev/null
if[-f"/etc/apt/sources.list.d/debian.sources"];then$SUDOsed's/main/contrib/'< /etc/apt/sources.list.d/debian.sources |$SUDOtee /etc/apt/sources.list.d/contrib.sources > /dev/null
fi;;esac
status 'Installing CUDA driver...'$SUDO dpkg -i$TEMP_DIR/cuda-keyring.deb
$SUDOapt-get update
[-n"$SUDO"]&&SUDO_E="$SUDO -E"||SUDO_E=DEBIAN_FRONTEND=noninteractive $SUDO_Eapt-get-yinstall cuda-drivers -q}if[!-f"/etc/os-release"];then
error "Unknown distribution. Skipping CUDA installation."fi. /etc/os-release
OS_NAME=$IDOS_VERSION=$VERSION_IDPACKAGE_MANAGER=forPACKAGE_MANAGERin dnf yum apt-get;doif available $PACKAGE_MANAGER;thenbreakfidoneif[-z"$PACKAGE_MANAGER"];then
error "Unknown package manager. Skipping CUDA installation."fiif! check_gpu nvidia-smi ||[-z"$(nvidia-smi |grep-o"CUDA Version: [0-9]*\.[0-9]*")"];thencase$OS_NAMEin
centos|rhel) install_cuda_driver_yum 'rhel'$(echo $OS_VERSION |cut-d'.'-f1);;
rocky) install_cuda_driver_yum 'rhel'$(echo $OS_VERSION |cut-c1);;
fedora)[$OS_VERSION-lt'37']&& install_cuda_driver_yum $OS_NAME$OS_VERSION|| install_cuda_driver_yum $OS_NAME'37';;
amzn) install_cuda_driver_yum 'fedora''37';;
debian) install_cuda_driver_apt $OS_NAME$OS_VERSION;;
ubuntu) install_cuda_driver_apt $OS_NAME$(echo $OS_VERSION |sed's/\.//');;
*)exit;;esacfiif! lsmod |grep-q nvidia ||! lsmod |grep-q nvidia_uvm;thenKERNEL_RELEASE="$(uname-r)"case$OS_NAMEin
rocky)$SUDO$PACKAGE_MANAGER-yinstall kernel-devel kernel-headers ;;
centos|rhel|amzn)$SUDO$PACKAGE_MANAGER-yinstall kernel-devel-$KERNEL_RELEASE kernel-headers-$KERNEL_RELEASE;;
fedora)$SUDO$PACKAGE_MANAGER-yinstall kernel-devel-$KERNEL_RELEASE;;
debian|ubuntu)$SUDOapt-get-yinstall linux-headers-$KERNEL_RELEASE;;
*)exit;;esacNVIDIA_CUDA_VERSION=$($SUDO dkms status |awk -F: '/added/ { print $1 }')if[-n"$NVIDIA_CUDA_VERSION"];then$SUDO dkms install$NVIDIA_CUDA_VERSIONfiif lsmod |grep-q nouveau;then
status 'Reboot to complete NVIDIA CUDA driver install.'exit0fi$SUDO modprobe nvidia
$SUDO modprobe nvidia_uvm
fi# make sure the NVIDIA modules are loaded on boot with nvidia-persistencedifcommand-v nvidia-persistenced > /dev/null 2>&1;then$SUDOtouch /etc/modules-load.d/nvidia.conf
MODULES="nvidia nvidia-uvm"forMODULEin$MODULES;doif!grep-qxF"$MODULE" /etc/modules-load.d/nvidia.conf;thenecho"$MODULE"|sudotee-a /etc/modules-load.d/nvidia.conf > /dev/null
fidonefi
status "NVIDIA GPU ready."
install_success
出现该内容说明Ollama已经安装完成
二、启动Nginx并部署Vue
启动nginx命令:
systemctl start nginx.service
查看nginx状态:
systemctl status nginx.service
关闭nginx命令:
systemctl stop nginx.service
修改子配置文件,因为子配置文件内是写http的内容。
nginx服务所在地址为:/etc/nginx/sites-available
进入该目录编辑default文件:vim default
index index.html index.htm index.nginx-debian.html;# First attempt to serve request as file, then# as directory, then fall back to displaying a 404.
try_files $uri$uri/ @router;}
location @router {
rewrite ^.*$ /index.html last;}
如果你前端使用的是vue并且用了vue-router,那么就需要配置该代码,否则你进行router跳转的时候,就会出现404的问题。
三、启动Python脚本
进入存放python脚本的目录,运行命令:python xxx.py。运行脚本后,系统可能会提示有一些模块没有安装,按照提示安装即可。
命令:
pip install module_name
其中可能有些脚本提示不对,比如:
ModuleNotFoundError: No module named 'docx'
如果出现这个问题,不能直接安装docx模块,而是应该安装python-docx。
将该安装的库全部安装后,进入放置python脚本的目录启动入口文件,短暂启动命令:python ai_analysis.py
持久后台运行命令:
nohup python ai_analysis.py /opt/app/llm_python/ai_analysis_project/log 2>&1
四、目前项目需要的库
使用MimiCPM需要的库,官方测试所用的环境:
Pillow10.1.0
torch2.1.2 / 1.13.0(原本的库版本)
torchvision0.16.2 / 0.17.1(原本的库版本)
transformers4.40.0
sentencepiece0.1.99
accelerate0.30.1
bitsandbytes==0.43.1
AI分析所需要的库
langchain
langchain_community
分析文档所需要的库
pandasai
python-docx
fitz
faiss-gpu (conda install faiss-gpu -c pytorch)
版权归原作者 申未曲 所有, 如有侵权,请联系我们删除。