diff --git a/install-nvidia-smi.md b/install-nvidia-smi.md index cdecc07..8596912 100644 --- a/install-nvidia-smi.md +++ b/install-nvidia-smi.md @@ -1,4 +1,3 @@ -nvidia_cublas_cu12-12.8.4.1-py3-none-manylinux_2_27_x86_64.whl ## RTX 4090 笔电操作记录 ```shell @@ -18,6 +17,5 @@ conda create -n vllm-py12 python=3.12 -y # 在该目录存放关于 vllm 和LLM相关的内容 cd /home/ss/vllm-py12 conda activate vllm-py12 -export https_proxy=http://10.159.236.165:7890 http_proxy=http://10.159.236.165:7890 all_proxy=socks5://10.159.236.165:7891 pip install vllm -i http://mirrors.cloud.tencent.com/pypi/simple --extra-index-url https://download.pytorch.org/whl/cu128 ``` \ No newline at end of file