Skip to content

Commit

Permalink
Change /root/ to /home/user/. (opea-project#475)
Browse files Browse the repository at this point in the history
* Change /root/ to /hmoe/user/.

Signed-off-by: zepan <[email protected]>

* Fix issue.

Signed-off-by: zepan <[email protected]>

---------

Signed-off-by: zepan <[email protected]>
Signed-off-by: siddhivelankar23 <[email protected]>
  • Loading branch information
ZePan110 authored and siddhivelankar23 committed Aug 22, 2024
1 parent 77ad930 commit 8467687
Show file tree
Hide file tree
Showing 8 changed files with 20 additions and 20 deletions.
2 changes: 1 addition & 1 deletion comps/dataprep/milvus/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
MILVUS_PORT = int(os.getenv("MILVUS_PORT", 19530))
COLLECTION_NAME = os.getenv("COLLECTION_NAME", "rag_milvus")

MOSEC_EMBEDDING_MODEL = os.environ.get("MOSEC_EMBEDDING_MODEL", "/root/bce-embedding-base_v1")
MOSEC_EMBEDDING_MODEL = os.environ.get("MOSEC_EMBEDDING_MODEL", "/home/user/bce-embedding-base_v1")
MOSEC_EMBEDDING_ENDPOINT = os.environ.get("MOSEC_EMBEDDING_ENDPOINT", "")
os.environ["OPENAI_API_BASE"] = MOSEC_EMBEDDING_ENDPOINT
os.environ["OPENAI_API_KEY"] = "Dummy key"
2 changes: 1 addition & 1 deletion comps/embeddings/langchain-mosec/embedding_mosec.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def embedding(input: TextDoc) -> EmbedDoc:
MOSEC_EMBEDDING_ENDPOINT = os.environ.get("MOSEC_EMBEDDING_ENDPOINT", "http://127.0.0.1:8080")
os.environ["OPENAI_API_BASE"] = MOSEC_EMBEDDING_ENDPOINT
os.environ["OPENAI_API_KEY"] = "Dummy key"
MODEL_ID = "/root/bge-large-zh-v1.5"
MODEL_ID = "/home/user/bge-large-zh-v1.5"
embeddings = MosecEmbeddings(model=MODEL_ID)
print("Mosec Embedding initialized.")
opea_microservices["opea_service@embedding_mosec"].start()
4 changes: 2 additions & 2 deletions comps/embeddings/langchain-mosec/mosec-docker/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,13 @@ docker run -itd -p 8000:8000 embedding:latest
- Restful API by curl

```shell
curl -X POST http://127.0.0.1:8000/v1/embeddings -H "Content-Type: application/json" -d '{ "model": "/root/bge-large-zh-v1.5/", "input": "hello world"}'
curl -X POST http://127.0.0.1:8000/v1/embeddings -H "Content-Type: application/json" -d '{ "model": "/home/user/bge-large-zh-v1.5/", "input": "hello world"}'
```

- generate embedding from python

```python
DEFAULT_MODEL = "/root/bge-large-zh-v1.5/"
DEFAULT_MODEL = "/home/user/bge-large-zh-v1.5/"
SERVICE_URL = "http://127.0.0.1:8000"
INPUT_STR = "Hello world!"

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

from openai import Client

DEFAULT_MODEL = "/root/bge-large-zh-v1.5/"
DEFAULT_MODEL = "/home/user/bge-large-zh-v1.5/"
SERVICE_URL = "http://127.0.0.1:8000"
INPUT_STR = "Hello world!"

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ docker run -d --rm --name="vllm-openvino-server" \
-e HTTPS_PROXY=$https_proxy \
-e HTTP_PROXY=$https_proxy \
-e HF_TOKEN=${HUGGINGFACEHUB_API_TOKEN} \
-v $HOME/.cache/huggingface:/root/.cache/huggingface \
-v $HOME/.cache/huggingface:/home/user/.cache/huggingface \
vllm:openvino /bin/bash -c "\
cd / && \
export VLLM_CPU_KVCACHE_SPACE=50 && \
Expand Down
6 changes: 3 additions & 3 deletions comps/llms/text-generation/vllm-ray/docker/Dockerfile.vllmray
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@ FROM vault.habana.ai/gaudi-docker/1.16.0/ubuntu22.04/habanalabs/pytorch-installe

ENV LANG=en_US.UTF-8

WORKDIR /root/vllm-ray
WORKDIR /home/user/vllm-ray

# copy the source code to the package directory
COPY comps/llms/text-generation/vllm-ray /root/vllm-ray
COPY comps/llms/text-generation/vllm-ray /home/user/vllm-ray

RUN pip install --upgrade-strategy eager optimum[habana] && \
pip install git+https://github.com/HabanaAI/[email protected]
Expand All @@ -21,7 +21,7 @@ RUN sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/
service ssh restart

ENV no_proxy=localhost,127.0.0.1
ENV PYTHONPATH=$PYTHONPATH:/root:/root/vllm-ray
ENV PYTHONPATH=$PYTHONPATH:/root:/home/user/vllm-ray

# Required by DeepSpeed
ENV RAY_EXPERIMENTAL_NOSET_HABANA_VISIBLE_MODULES=1
Expand Down
20 changes: 10 additions & 10 deletions comps/llms/text-generation/vllm-xft/docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -58,13 +58,13 @@ RUN cmake .. -DCMAKE_INSTALL_PREFIX=/usr/local/oneCCL \

RUN echo "source /usr/local/oneCCL/env/setvars.sh" >> ~/.bashrc

WORKDIR /root/
WORKDIR /home/user/
RUN rm -rf /tmp/oneCCL

RUN git clone https://github.com/intel/xFasterTransformer.git

SHELL ["/bin/bash", "-c"]
WORKDIR /root/xFasterTransformer
WORKDIR /home/user/xFasterTransformer
RUN git checkout ${TAG} \
&& export "LD_LIBRARY_PATH=/usr/local/mklml_lnx_2019.0.5.20190502/lib:$LD_LIBRARY_PATH" \
&& export "PATH=/usr/bin/python3.8:$PATH" \
Expand All @@ -75,23 +75,23 @@ RUN git checkout ${TAG} \
&& pip install --no-cache-dir dist/*

RUN mkdir -p /usr/local/xft/lib \
&& cp /root/xFasterTransformer/build/libxfastertransformer.so /usr/local/xft/lib \
&& cp /root/xFasterTransformer/build/libxft_comm_helper.so /usr/local/xft/lib \
&& cp -r /root/xFasterTransformer/include /usr/local/xft/ \
&& cp /home/user/xFasterTransformer/build/libxfastertransformer.so /usr/local/xft/lib \
&& cp /home/user/xFasterTransformer/build/libxft_comm_helper.so /usr/local/xft/lib \
&& cp -r /home/user/xFasterTransformer/include /usr/local/xft/ \
&& mkdir -p /usr/local/include/xft/ \
&& ln -s /usr/local/xft/include /usr/local/include/xft/include

RUN echo "export \$(python -c 'import xfastertransformer as xft; print(xft.get_env())')" >> ~/.bashrc

COPY comps /root/comps
COPY comps /home/user/comps

RUN pip install --no-cache-dir --upgrade pip && \
pip install --no-cache-dir -r /root/comps/llms/text-generation/vllm-xft/requirements.txt
pip install --no-cache-dir -r /home/user/comps/llms/text-generation/vllm-xft/requirements.txt

ENV PYTHONPATH=$PYTHONPATH:/root

RUN chmod +x /root/comps/llms/text-generation/vllm-xft/run.sh
RUN chmod +x /home/user/comps/llms/text-generation/vllm-xft/run.sh

WORKDIR /root/comps/llms/text-generation/vllm-xft/
WORKDIR /home/user/comps/llms/text-generation/vllm-xft/

ENTRYPOINT ["/root/comps/llms/text-generation/vllm-xft/run.sh"]
ENTRYPOINT ["/home/user/comps/llms/text-generation/vllm-xft/run.sh"]
2 changes: 1 addition & 1 deletion comps/retrievers/langchain/milvus/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,4 @@
MOSEC_EMBEDDING_ENDPOINT = os.environ.get("MOSEC_EMBEDDING_ENDPOINT", "")
os.environ["OPENAI_API_BASE"] = MOSEC_EMBEDDING_ENDPOINT
os.environ["OPENAI_API_KEY"] = "Dummy key"
MODEL_ID = "/root/bce-embedding-base_v1"
MODEL_ID = "/home/user/bce-embedding-base_v1"

0 comments on commit 8467687

Please sign in to comment.