From 6255434c530220f246c6adfea5ab251a07b2a261 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 20:21:00 +0000 Subject: [PATCH] Deployed 846d182 with MkDocs version: 1.6.1 --- RANs/oai/index.html | 125 +++++++++++++++++++++++++++++++++++++++ search/search_index.json | 2 +- sitemap.xml.gz | Bin 127 -> 127 bytes 3 files changed, 126 insertions(+), 1 deletion(-) diff --git a/RANs/oai/index.html b/RANs/oai/index.html index 0ea2f28..e716bb5 100644 --- a/RANs/oai/index.html +++ b/RANs/oai/index.html @@ -579,6 +579,48 @@ + + +
$ sudo ./nr-softmodem -O /path-to/gnb.conf --sa -E --continuous-tx | tee oai.logs
+
First step is to configure both the centralized unit and the distributed unit. The modified configuration that we used can be found here
+...
+local_s_if_name = "enp3s0"; #set this to your network interface
+local_s_address = "191.4.205.149"; #use 127.0.0.4 with lo interface if trying to run both units in the same computer.
+remote_s_address = "191.4.204.52"; #if you are trying to run more than one DU set this to 0.0.0.0 (any).
+...
+amf_ip_address = ( {
+ ipv4 = "192.168.70.132";
+ ipv6 = "192:168:30::17";
+ active = "yes";
+ preference = "ipv4";
+});
+
+ NETWORK_INTERFACES :
+ {
+ GNB_INTERFACE_NAME_FOR_NG_AMF = "enp3s0";
+ GNB_IPV4_ADDRESS_FOR_NG_AMF = "191.4.205.149/23";
+ GNB_INTERFACE_NAME_FOR_NGU = "enp3s0";
+ GNB_IPV4_ADDRESS_FOR_NGU = "191.4.205.149/23";
+ GNB_PORT_FOR_S1U = 2152; # Spec 2152
+ };
+ }
+
...
+gNB_ID = 0xe00; #should be the same as the one defined on CU config.
+gNB_DU_ID = 0xe00; #change this if running more than one DU.
+...
+//////// Physical parameters:
+min_rxtxtime = 6;
+...
+local_n_if_name = "enp3s0"; #change this to your interface of choice.
+local_n_address = "191.4.204.149"; #if running more than one DU change this to something different from the other DUs.
+remote_n_address = "191.4.204.174"; #change this to your CU ip addr, if local 127.0.0.4.
+
$ sudo ./nr-softmodem -O ~/cu.conf --sa -E --continuous-tx
+$ sudo ./nr-softmodem -O ~/du.conf --sa -E --continuous-tx
The authors would like to thank the S\u00e3o Paulo Research Foundation (FAPESP) for their support through the projects PORVIR-5G: Programmability, Orchestration, and Virtualization of Networks in 5G, grant number 2020/05182-3, and SAMURAI: intelligent 5G core and integration of multiple access networks, grant number 20/05127-2.
The opinions, hypotheses, conclusions, or recommendations expressed in this material are the responsibility of the author(s) and do not necessarily reflect the views of FAPESP.
"},{"location":"paper/","title":"Published Paper","text":""},{"location":"paper/#published-paper","title":"Published Paper","text":""},{"location":"Core-Network/free5gc/","title":"Free5GC","text":""},{"location":"Core-Network/free5gc/#free5gc","title":"Free5GC","text":""},{"location":"Core-Network/free5gc/#1-set-up","title":"1. Set Up","text":"In our test we used the version v3.3.0.
"},{"location":"Core-Network/free5gc/#11-core-host-configurations","title":"1.1 Core-host Configurations","text":"sudo sysctl net.ipv4.conf.all.forwarding=1\nsudo iptables -P FORWARD ACCEPT\n
"},{"location":"Core-Network/free5gc/#12-clone-free5gc-compose","title":"1.2 Clone free5gc-compose","text":"git clone https://github.com/free5gc/free5gc-compose\ngit checkout v3.3.0\ngit submodule update --init --recursive\n
"},{"location":"Core-Network/free5gc/#13-changing-core-settings","title":"1.3 Changing Core Settings","text":"git clone https://github.com/eduardoschulz/Interoperabilidade.git\ncd Interoperabilidade/core-networks/free5gc/core-networks/FREE5GC\ncp -r config path/to/free5gc-compose \n
"},{"location":"Core-Network/free5gc/#14-gnb-host-configurations","title":"1.4 gNB-host Configurations","text":"You also must configure a route to the internal docker network so that the gNB can make a connection.
sudo ip route add 10.100.200.0/24 via {ip_addr_corehost} dev {interface}\n\n#example\nsudo ip route add 10.100.200.0/24 via 191.4.205.38 dev br01\n
"},{"location":"Core-Network/free5gc/#20-installing-gtp-u-kernel-module","title":"2.0 Installing GTP-U Kernel Module","text":"git clone https://github.com/free5gc/gtp5g.git && cd gtp5g\nmake clean && make\nsudo make install\n
"},{"location":"Core-Network/free5gc/#30-deploying-the-core-network","title":"3.0 Deploying the Core Network","text":"cd path-to/free5gc-compose\ndocker compose up -d\n
docker compose down \n
To set up your UEs you'll need to go to the free5gc webpage on your machine. You should see a login screen when accessing http://:3000. The credentials are admin and the password is free5gc.
"},{"location":"Core-Network/free5gc/#40-more-information","title":"4.0 More Information","text":"Free5GC - Github Page Free5GC - Compose Free5GC - Forum
"},{"location":"Core-Network/oaicn/","title":"OAI - Core Network","text":""},{"location":"Core-Network/oaicn/#oai-core-network","title":"OAI - Core Network","text":""},{"location":"Core-Network/oaicn/#1-set-up","title":"1. Set Up","text":"For our test the version 1.5.0 was used.
"},{"location":"Core-Network/oaicn/#11-core-host-configurations","title":"1.1 Core-host Configurations","text":"sudo sysctl net.ipv4.conf.all.forwarding=1\nsudo iptables -P FORWARD ACCEPT\n
"},{"location":"Core-Network/oaicn/#12-clone-oai-cn5g-fed","title":"1.2 Clone oai-cn5g-fed","text":"git clone https://gitlab.eurecom.fr/oai/cn5g/oai-cn5g-fed.git\ngit checkout v1.5.0\n
"},{"location":"Core-Network/oaicn/#13-changing-core-settings","title":"1.3 Changing Core Settings","text":"git clone https://github.com/eduardoschulz/Interoperabilidade.git\ncd Interoperabilidade/core-networks/OAI-CN/\n\n\nmv docker-compose-basic-nrf.yaml ~/oai-cn5g-fed/docker-compose/\n
"},{"location":"Core-Network/oaicn/#14-gnb-host-configurations","title":"1.4 gNB-host Configurations","text":"You also must configure a route to the internal docker network so that the gNB can make a connection.
sudo ip route add 192.168.70.128/26 via {ip_addr_corehost} dev {interface}\n\n#example\nsudo ip route add 192.168.70.128/26 via 191.4.205.38 dev br01\n
"},{"location":"Core-Network/oaicn/#20-deploying-the-core-network","title":"2.0 Deploying the Core Network","text":"cd path-to/oai-cn5g-fed/docker-compose\npython3 core-networks.py --type start-basic --scenario 1\n
python3 core-networks.py --type stop-basic --scenario 1\n
docker exec -it mysql bash\nmysql -u root -p\n\npassword: linux \n
use oai_db;\nINSERT INTO `AuthenticationSubscription` (`ueid`, `authenticationMethod`, `encPermanentKey`, `protectionParameterId`, `sequenceNumber`, `authenticationManagementField`, `algorithmId`, `encOpcKey`, `encTopcKey`, `vectorGenerationInHss`, `n5gcAuthMethod`, `rgAuthenticationInd`, `supi`) VALUES\n('001010123456789', '5G_AKA', '41B7157E3337F0ADD8DA89210D89E17F', '41B7157E3337F0ADD8DA89210D89E17F', '{\\\"sqn\\\": \\\"000000000020\\\", \\\"sqnScheme\\\": \\\"NON_TIME_BASED\\\", \\\"lastIndexes\\\": {\\\"ausf\\\": 0}}', '8000', 'milenage', '1CD638FC96E02EBD35AA0D41EB6F812F', NULL, NULL, NULL, NULL, '001010123456789');\n\nINSERT INTO `SessionManagementSubscriptionData` (`ueid`, `servingPlmnid`, `singleNssai`, `dnnConfigurations`) VALUES\n('001010123456789', '00101', '{\\\"sst\\\": 222, \\\"sd\\\": \\\"123\\\"}','{\\\"default\\\":{\\\"pduSessionTypes\\\":{ \\\"defaultSessionType\\\": \\\"IPV4\\\"},\\\"sscModes\\\": {\\\"defaultSscMode\\\": \\\"SSC_MODE_1\\\"},\\\"5gQosProfile\\\": {\\\"5qi\\\": 6,\\\"arp\\\":{\\\"priorityLevel\\\": 1,\\\"preemptCap\\\": \\\"NOT_PREEMPT\\\",\\\"preemptVuln\\\":\\\"NOT_PREEMPTABLE\\\"},\\\"priorityLevel\\\":1},\\\"sessionAmbr\\\":{\\\"uplink\\\":\\\"1000Mbps\\\", \\\"downlink\\\":\\\"1000Mbps\\\"},\\\"staticIpAddress\\\":[{\\\"ipv4Addr\\\": \\\"12.1.1.4\\\"}]}}');\n
The configuration above is only going to last until the core is restarted. If you want to make this static you must make changes on the db files inside path/oaicn/docker-compose/databases/. If you want to use ours:
rm -r ~/oai-cn5g-fed/docker-compose/database\ncp -r database ~/oai-cn5g-fed/docker-compose/database\n
"},{"location":"Core-Network/oaicn/#30-more-information","title":"3.0 More Information","text":"Basic Deployment using Docker Compose
"},{"location":"Core-Network/open5gs/","title":"Open5GS","text":""},{"location":"Core-Network/open5gs/#open5gs","title":"Open5GS","text":""},{"location":"Core-Network/open5gs/#1-set-up","title":"1. Set Up","text":"In our test we used the version v2.7.0
"},{"location":"Core-Network/open5gs/#11-core-host-configurations","title":"1.1 Core-host Configurations","text":"sudo sysctl net.ipv4.conf.all.forwarding=1\nsudo iptables -P FORWARD ACCEPT\n
"},{"location":"Core-Network/open5gs/#12-clone-docker_open5gs","title":"1.2 Clone docker_open5gs","text":"git clone https://github.com/herlesupreeth/docker_open5gs\ngit checkout v2.7.0\n
"},{"location":"Core-Network/open5gs/#13-changing-core-settings","title":"1.3 Changing Core Settings","text":"git clone https://github.com/eduardoschulz/Interoperabilidade.git\ncd Interoperabilidade/core-networks/OPEN5GS\ncp sa-deploy.yaml /path/to/docker_open5gs/.\ncp .env /path/to/docker_open5gs/. \n\ncp -r smf/ /path/to/docker_open5gs/ #here you need to modify your dnn \ncp -r upf/ /path/to/docker_open5gs/ #same thing as above\n
"},{"location":"Core-Network/open5gs/#20-deploying-the-core-network","title":"2.0 Deploying the Core Network","text":"cd path-to/docker_open5gs\ndocker compose -f sa-deploy.yaml up -d\n
docker compose down \n
To set up your UEs you'll need to go to the open5gs webpage on your machine. You should see a login screen when accessing http://:3000. The credentials are admin and the password is 1423.
"},{"location":"Core-Network/open5gs/#40-more-information","title":"4.0 More Information","text":"Open5GS - Docker Open5GS - Documentation Open5GS - Github Page srsRAN - Docker; Gradiant - Open5gs in k8s.
"},{"location":"RANs/oai/","title":"OpenAirInterface","text":""},{"location":"RANs/oai/#openairinterface","title":"OpenAirInterface","text":"For our testing we used the 2.1 release of the project.
"},{"location":"RANs/oai/#how-to-build","title":"How to Build","text":"In this setup we have used OpenAirInterface built with the --build-e2 flag.
"},{"location":"RANs/oai/#0-required-dependencies","title":"0. Required dependencies","text":""},{"location":"RANs/oai/#01-building-swig","title":"0.1 Building Swig","text":"$ git clone https://github.com/swig/swig.git && cd swig\n$ git checkout release-4.2\n$ ./autogen.sh\n$ ./configure --prefix=/usr/\n$ make -j$(nproc)\n$ sudo make install\n
"},{"location":"RANs/oai/#02-installing-other-dependencies","title":"0.2 Installing other dependencies","text":"$ sudo apt install libsctp-dev python3 cmake-curses-gui libpcre2-dev\n
"},{"location":"RANs/oai/#1-building-openairinterface","title":"1. Building OpenAirInterface","text":"$ git clone https://gitlab.eurecom.fr/oai/openairinterface5g oai\n$ cd oai\n$ git checkout v2.1.0\n$ ./build_oai -w USRP --gNB --nrUE --build-e2 \n
"},{"location":"RANs/oai/#2-building-flexric","title":"2. Building Flexric","text":"$ cd oai/openair2/E2AP/flexric\n$ mkdir build && cd build\n$ cmake -DSWIG_DIR=/usr/share/swig/4.2.0/ -DSWIG_EXECUTABLE=/usr/bin/swig -DCMAKE_C_COMPILER=gcc-10 -DCMAKE_CXX_COMPILER=g++-10 ..\n$ make -j$(nproc)\n$ sudo make install \n
"},{"location":"RANs/oai/#3-launching-gnb","title":"3. Launching gNB","text":""},{"location":"RANs/oai/#31-gnb-configuration","title":"3.1 gNB configuration","text":"First some modifications on the configuration file are required to make the gNB work properly. Some of the configurations bellow are optional depending on your setup.
$ cd /path-to/openairinterface5g/targets/PROJECTS/GENERIC-NR-5GC/CONF\n$ vi gnb.sa.band78.fr1.106PRB.usrpb210.conf #in this case we're using the usrp b210.\n
tracking_area_code = 1;\nplmn_list = ({ mcc = 001; mnc = 01; mnc_length = 2; snssaiList = ({ sst = 1; }) }); #in this case we are using the test plmn 00101\n...\nmin_rxtxtime = 6;\n...\namf_ip_address = ( { ipv4 = \"191.4.205.169\"; #change this to our amf ip; default for oai cn: 192.168.70.132\n ipv6 = \"192:168:30::17\";\n active = \"yes\";\n preference = \"ipv4\";\n }\n );\n\n\nNETWORK_INTERFACES :\n {\n GNB_INTERFACE_NAME_FOR_NG_AMF = \"br01\"; #change to our host machine network interface of choice\n GNB_IPV4_ADDRESS_FOR_NG_AMF = \"191.4.204.211\"; #change to the ip addr of the interface selected \n GNB_INTERFACE_NAME_FOR_NGU = \"br01\"; #change to our host machine network interface of choice\n GNB_IPV4_ADDRESS_FOR_NGU = \"191.4.204.211\"; #change to the ip addr of the interface selected \n GNB_PORT_FOR_S1U = 2152;\n };\n...\n\ne2_agent = {\n near_ric_ip_addr = \"191.4.204.161\" #change to to the ip addr of the ric. If you are running flexric locally --> 127.0.0.1\n sm_dir = \"/usr/local/lib/flexric/\"\n};\n
"},{"location":"RANs/oai/#32-running-nr-softmodem","title":"3.2 Running nr-softmodem","text":"$ sudo ./nr-softmodem -O /path-to/gnb.conf --sa -E --continuous-tx | tee oai.logs\n
"},{"location":"RANs/srsran/","title":"SRSRAN","text":""},{"location":"RANs/srsran/#srsran","title":"SRSRAN","text":""},{"location":"RANs/srsran/#1-building","title":"1. Building","text":"Version: 23.10.1 UHD version: 4.6.0.0 OS version: Ubuntu Server 22.04 LTS
For building we recommend following the official documentation available here to build srsRAN from source. The UHD driver was used and built from sources following this guide.
"},{"location":"RANs/srsran/#2-running","title":"2. Running","text":"After building and installing srsRAN we recommend running the srsran_performance script available in the official srsran_project repository. Since these changes do not persist remember to re-run them if the machine is restarted.
Watchout as srsRAN requires the bind address to be specified whenever trying to connect to an external machine.
Finnaly, free5gc and OAI CN require a change in routing table of the gNB host. This change can be applied with the commands:
Free5Gc: sudo ip route add 10.100.200.0/24 via {external addr of the core host} dev {name of the network interface used to reach the core host}
.
OAI CN: sudo ip route add 10.100.200.0/26 via {external addr of the core host} dev {name of the network interface used to reach the core host}
.
We installed the non rt ric using kubernetes and containers, to do so we used kubeadm and containerd.
We've started with a machine running Ubuntu Desktop 22.04 LTS and installed docker following their own documentation.
For our cgroup driver we chose systemd. Our configuration file can be found here. Make sure to install kubectl as well. As our pod network add-on, we picked flannel. Finally we removed the node-role.kubernetes.io/control-plane:NoSchedule
taint from all nodes.
Now we should have a healthy kubernetes cluster running so its time to deploy the NearRTRIC itself.
"},{"location":"RICs/flexric/","title":"Flexric","text":""},{"location":"RICs/flexric/#flexric","title":"Flexric","text":""},{"location":"RICs/flexric/#0-required-dependencies","title":"0. Required dependencies","text":""},{"location":"RICs/flexric/#01-prerequisites","title":"0.1 Prerequisites","text":"$ git clone https://github.com/swig/swig.git && cd swig\n$ git checkout release-4.1\n$ ./autogen.sh\n$ ./configure --prefix=/usr/\n$ make -j$(nproc)\n$ sudo make install\n
"},{"location":"RICs/flexric/#02-other-dependencies","title":"0.2 Other dependencies","text":"$ sudo apt install libsctp-dev python3 cmake-curses-gui libpcre2-dev python3-dev\n
"},{"location":"RICs/flexric/#10-building-flexric","title":"1.0 Building Flexric","text":"$ git clone https://gitlab.eurecom.fr/mosaic5g/flexric.git\n$ git checkout <*version>\n
*For an oai install refer back to oai-flexric. More information can be found at.
$ cd flexric\n$ mkdir build && cd build\n$ cmake -DSWIG_DIR=/usr/share/swig/4.1.0/ -DSWIG_EXECUTABLE=/usr/bin/swig -DCMAKE_C_COMPILER=gcc-10 -DCMAKE_CXX_COMPILE=g++-10 ..\n$ make -j$(nproc)\n$ sudo make install\n
"},{"location":"RICs/flexric/#20-running-flexric","title":"2.0 Running Flexric","text":""},{"location":"RICs/flexric/#21-nearrt-ric","title":"2.1 NearRT-RIC","text":"$ cd build/examples/ric/\n$ ./nearRT-RIC # you can use -c to specify a config file\n
"},{"location":"RICs/flexric/#22-running-a-xapp","title":"2.2 Running a xApp","text":"$ cd build/examples/c/monitor/\n$ ./xapp_kpm_moni\n
"},{"location":"RICs/oran-sc/","title":"Near RealTime RIC","text":""},{"location":"RICs/oran-sc/#near-realtime-ric","title":"Near RealTime RIC","text":""},{"location":"RICs/oran-sc/#1-installation","title":"1. Installation","text":""},{"location":"RICs/oran-sc/#11-containerd-and-kubernetes-skip-if-already-installed","title":"1.1. Containerd and kubernetes (skip if already installed)","text":"First things first install containerd, kubeadm and kubectl. The cluster configuration passed to kubeadm is available here and the only required modification is under the \"CertSANs\" option you should change to the hostname of your machine. This modification ensures that the self-signed certificates generated by kubeadm will remain valid for your machine after a change in IP address, which is common for us, as long as you as acessing it using its hostname.
Note: This will install kubernetes using containerd instead of docker, so the cli for your containers is ctr
and the namespace used by kubernetes is k8s.io
Create the kubernetes cluster using the config.yaml provided.
kubeadm init --config config.yaml\n
Install flannel for networking to work
kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml\n
Create namespaces in kubernetes
kubectl create ns ricplt\nkubectl create ns ricxapp\n
Remove master taint so everything can run on the same machine
kubectl taint nodes --all node-role.kubernetes.io/control-plane-\n
"},{"location":"RICs/oran-sc/#13-installing-the-near-rt-ric","title":"1.3. Installing the Near-RT RIC","text":"Clone O-RAN SCs ric-dep
repository
git clone \"https://gerrit.o-ran-sc.org/r/ric-plt/ric-dep\"\n
Start a chartsmuseum instance (this is a repository for helm charts).
chartmuseum --debug --port 6873 --storage local --storage-local-rootdir $HOME/helm/chartsmuseum/\n
Add a plugin to allow helm to push charts to its chartmuseums
helm plugin install https://github.com/chartmuseum/helm-push\n
Add the local museum to helm
helm repo add local http://localhost:6873/\nhelm repo list\n
Build the helm charts and upload them to the local chartsmuseum
cd helm/charts\nmake nearrtric\n
Install the RIC using the provided in the ric-dep repository
helm install nearrtric -n ricplt local/nearrtric -f RECIPE_EXAMPLE/example_recipe_oran_h_release.yaml\n
"},{"location":"RICs/oran-sc/#2-building-xapps","title":"2. Building xApps","text":"Here are the instructions to build the modified version of the bouncer xApp used in tests (the one that works with srsRAN 23.10.1). First clone the repository:
git clone https://github.com/gckopper/bouncer-xapp -b srsRAN-E2 --depth 1\n
Note: the -b
flag allows you to clone a specific branch, in this case the srsRAN-E2
branch, and the --depth 1
flag tells git to only clone the latest commit. Then there is a script inside the repository that builds the container image using docker and saves it to containerd internal repository. Building images with ctr is not possible at the moment. Once the image has finished build it is time to deploy it.
"},{"location":"RICs/oran-sc/#3-deploying-and-managing-xapps","title":"3. Deploying and managing xApps","text":"To deploy the container image, first clone the official appmgr repository from O-RAN.
git clone https://gerrit.o-ran-sc.org/r/ric-plt/appmgr\n
This application is used to manage xApps. So lets build it and install it. First we need to go inside the correct folder
cd appmgr/xapp_orchestrater/dev/xapp_onboarder\n
Create a python virtual environmente to isolate the dependencies of the xApp Onboarder from your system. Just remember that to use this application you'll need to activate the environment using the second command.
python3 -m venv venv3\nsource venv/bin/activate\npip install -r requirements.txt\n
If you need to deactivate this virtual environment simply use the command deativate
. Onboard the xApp. Keep in mind that the typo in shcema
is necessary.
CHART_REPO_URL=http://localhost:6873 dms_cli onboard --config-file-path <path-to-bouncer-xapp>/Bouncer/init/config-file.json --shcema_file_path <path-to-bouncer-xapp>/Bouncer/init/schema.json\n
Download the chart you've just created.
CHART_REPO_URL=http://localhost:6873 dms_cli download_helm_chart bouncer-xapp 2.0.0\n
Install the xApp with helm.
helm install bouncer-xapp -n ricxapp bouncer-xapp-2.0.0.tgz\n
Once installed you can start and stop the application by scaling its deployment in kubernetes.
# start xapp\nkubectl scale --replicas=1 -n ricxapp deployment ricxapp-bouncer-xapp\n# stop xapp\nkubectl scale --replicas=0 -n ricxapp deployment ricxapp-bouncer-xapp\n
Note: When developing or changing the code for the xApp you may need to update the running version of the container image. To do so re-build the image using the script as that will update the image in your local containerd repository. Then simply scale the xApp deployment down and up and the new version should be running. This will only work if the version stays the same.
"},{"location":"UE/simcard/","title":"SIM cards","text":""},{"location":"UE/simcard/#sim-cards","title":"SIM cards","text":"The SIM cards user are made by osmocom so we used their tool to flash new identities to the cards.
git clone https://github.com/osmocom/pysim\ncd pysim\nsudo apt-get install --no-install-recommends \\\n pcscd libpcsclite-dev \\\n python3 \\\n python3-setuptools \\\n python3-pyscard \\\n python3-pip\npip3 install -r requirements.txt\n
The values in the card used in our testing was flashed using the following command (note: the -a
part is your ADM-KEY and it will differ from ours)
./pySim-prog.py -p0 -s 8988211000000689615 --mcc=001 --mnc=01 -a 77190612 --imsi=001010123456789 -k 41B7157E3337F0ADD8DA89210D89E17F --opc=1CD638FC96E02EBD35AA0D41EB6F812F\n
We also had a seconf simcard with the following configuration
imsi: 901700000028080\nk: 724d0f31f2259622700437430b7b5c6e\nopc: 1140620b2805d84b44643bfcfbe6218c\n
"}]}
\ No newline at end of file
+{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Interoperabilidade","text":""},{"location":"#interoperabilidade","title":"Interoperabilidade","text":""},{"location":"#sections","title":"Sections:","text":""},{"location":"#rans","title":"RANs:","text":"The authors would like to thank the S\u00e3o Paulo Research Foundation (FAPESP) for their support through the projects PORVIR-5G: Programmability, Orchestration, and Virtualization of Networks in 5G, grant number 2020/05182-3, and SAMURAI: intelligent 5G core and integration of multiple access networks, grant number 20/05127-2.
The opinions, hypotheses, conclusions, or recommendations expressed in this material are the responsibility of the author(s) and do not necessarily reflect the views of FAPESP.
"},{"location":"paper/","title":"Published Paper","text":""},{"location":"paper/#published-paper","title":"Published Paper","text":""},{"location":"Core-Network/free5gc/","title":"Free5GC","text":""},{"location":"Core-Network/free5gc/#free5gc","title":"Free5GC","text":""},{"location":"Core-Network/free5gc/#1-set-up","title":"1. Set Up","text":"In our test we used the version v3.3.0.
"},{"location":"Core-Network/free5gc/#11-core-host-configurations","title":"1.1 Core-host Configurations","text":"sudo sysctl net.ipv4.conf.all.forwarding=1\nsudo iptables -P FORWARD ACCEPT\n
"},{"location":"Core-Network/free5gc/#12-clone-free5gc-compose","title":"1.2 Clone free5gc-compose","text":"git clone https://github.com/free5gc/free5gc-compose\ngit checkout v3.3.0\ngit submodule update --init --recursive\n
"},{"location":"Core-Network/free5gc/#13-changing-core-settings","title":"1.3 Changing Core Settings","text":"git clone https://github.com/eduardoschulz/Interoperabilidade.git\ncd Interoperabilidade/core-networks/free5gc/core-networks/FREE5GC\ncp -r config path/to/free5gc-compose \n
"},{"location":"Core-Network/free5gc/#14-gnb-host-configurations","title":"1.4 gNB-host Configurations","text":"You also must configure a route to the internal docker network so that the gNB can make a connection.
sudo ip route add 10.100.200.0/24 via {ip_addr_corehost} dev {interface}\n\n#example\nsudo ip route add 10.100.200.0/24 via 191.4.205.38 dev br01\n
"},{"location":"Core-Network/free5gc/#20-installing-gtp-u-kernel-module","title":"2.0 Installing GTP-U Kernel Module","text":"git clone https://github.com/free5gc/gtp5g.git && cd gtp5g\nmake clean && make\nsudo make install\n
"},{"location":"Core-Network/free5gc/#30-deploying-the-core-network","title":"3.0 Deploying the Core Network","text":"cd path-to/free5gc-compose\ndocker compose up -d\n
docker compose down \n
To set up your UEs you'll need to go to the free5gc webpage on your machine. You should see a login screen when accessing http://:3000. The credentials are admin and the password is free5gc.
"},{"location":"Core-Network/free5gc/#40-more-information","title":"4.0 More Information","text":"Free5GC - Github Page Free5GC - Compose Free5GC - Forum
"},{"location":"Core-Network/oaicn/","title":"OAI - Core Network","text":""},{"location":"Core-Network/oaicn/#oai-core-network","title":"OAI - Core Network","text":""},{"location":"Core-Network/oaicn/#1-set-up","title":"1. Set Up","text":"For our test the version 1.5.0 was used.
"},{"location":"Core-Network/oaicn/#11-core-host-configurations","title":"1.1 Core-host Configurations","text":"sudo sysctl net.ipv4.conf.all.forwarding=1\nsudo iptables -P FORWARD ACCEPT\n
"},{"location":"Core-Network/oaicn/#12-clone-oai-cn5g-fed","title":"1.2 Clone oai-cn5g-fed","text":"git clone https://gitlab.eurecom.fr/oai/cn5g/oai-cn5g-fed.git\ngit checkout v1.5.0\n
"},{"location":"Core-Network/oaicn/#13-changing-core-settings","title":"1.3 Changing Core Settings","text":"git clone https://github.com/eduardoschulz/Interoperabilidade.git\ncd Interoperabilidade/core-networks/OAI-CN/\n\n\nmv docker-compose-basic-nrf.yaml ~/oai-cn5g-fed/docker-compose/\n
"},{"location":"Core-Network/oaicn/#14-gnb-host-configurations","title":"1.4 gNB-host Configurations","text":"You also must configure a route to the internal docker network so that the gNB can make a connection.
sudo ip route add 192.168.70.128/26 via {ip_addr_corehost} dev {interface}\n\n#example\nsudo ip route add 192.168.70.128/26 via 191.4.205.38 dev br01\n
"},{"location":"Core-Network/oaicn/#20-deploying-the-core-network","title":"2.0 Deploying the Core Network","text":"cd path-to/oai-cn5g-fed/docker-compose\npython3 core-networks.py --type start-basic --scenario 1\n
python3 core-networks.py --type stop-basic --scenario 1\n
docker exec -it mysql bash\nmysql -u root -p\n\npassword: linux \n
use oai_db;\nINSERT INTO `AuthenticationSubscription` (`ueid`, `authenticationMethod`, `encPermanentKey`, `protectionParameterId`, `sequenceNumber`, `authenticationManagementField`, `algorithmId`, `encOpcKey`, `encTopcKey`, `vectorGenerationInHss`, `n5gcAuthMethod`, `rgAuthenticationInd`, `supi`) VALUES\n('001010123456789', '5G_AKA', '41B7157E3337F0ADD8DA89210D89E17F', '41B7157E3337F0ADD8DA89210D89E17F', '{\\\"sqn\\\": \\\"000000000020\\\", \\\"sqnScheme\\\": \\\"NON_TIME_BASED\\\", \\\"lastIndexes\\\": {\\\"ausf\\\": 0}}', '8000', 'milenage', '1CD638FC96E02EBD35AA0D41EB6F812F', NULL, NULL, NULL, NULL, '001010123456789');\n\nINSERT INTO `SessionManagementSubscriptionData` (`ueid`, `servingPlmnid`, `singleNssai`, `dnnConfigurations`) VALUES\n('001010123456789', '00101', '{\\\"sst\\\": 222, \\\"sd\\\": \\\"123\\\"}','{\\\"default\\\":{\\\"pduSessionTypes\\\":{ \\\"defaultSessionType\\\": \\\"IPV4\\\"},\\\"sscModes\\\": {\\\"defaultSscMode\\\": \\\"SSC_MODE_1\\\"},\\\"5gQosProfile\\\": {\\\"5qi\\\": 6,\\\"arp\\\":{\\\"priorityLevel\\\": 1,\\\"preemptCap\\\": \\\"NOT_PREEMPT\\\",\\\"preemptVuln\\\":\\\"NOT_PREEMPTABLE\\\"},\\\"priorityLevel\\\":1},\\\"sessionAmbr\\\":{\\\"uplink\\\":\\\"1000Mbps\\\", \\\"downlink\\\":\\\"1000Mbps\\\"},\\\"staticIpAddress\\\":[{\\\"ipv4Addr\\\": \\\"12.1.1.4\\\"}]}}');\n
The configuration above is only going to last until the core is restarted. If you want to make this static you must make changes on the db files inside path/oaicn/docker-compose/databases/. If you want to use ours:
rm -r ~/oai-cn5g-fed/docker-compose/database\ncp -r database ~/oai-cn5g-fed/docker-compose/database\n
"},{"location":"Core-Network/oaicn/#30-more-information","title":"3.0 More Information","text":"Basic Deployment using Docker Compose
"},{"location":"Core-Network/open5gs/","title":"Open5GS","text":""},{"location":"Core-Network/open5gs/#open5gs","title":"Open5GS","text":""},{"location":"Core-Network/open5gs/#1-set-up","title":"1. Set Up","text":"In our test we used the version v2.7.0
"},{"location":"Core-Network/open5gs/#11-core-host-configurations","title":"1.1 Core-host Configurations","text":"sudo sysctl net.ipv4.conf.all.forwarding=1\nsudo iptables -P FORWARD ACCEPT\n
"},{"location":"Core-Network/open5gs/#12-clone-docker_open5gs","title":"1.2 Clone docker_open5gs","text":"git clone https://github.com/herlesupreeth/docker_open5gs\ngit checkout v2.7.0\n
"},{"location":"Core-Network/open5gs/#13-changing-core-settings","title":"1.3 Changing Core Settings","text":"git clone https://github.com/eduardoschulz/Interoperabilidade.git\ncd Interoperabilidade/core-networks/OPEN5GS\ncp sa-deploy.yaml /path/to/docker_open5gs/.\ncp .env /path/to/docker_open5gs/. \n\ncp -r smf/ /path/to/docker_open5gs/ #here you need to modify your dnn \ncp -r upf/ /path/to/docker_open5gs/ #same thing as above\n
"},{"location":"Core-Network/open5gs/#20-deploying-the-core-network","title":"2.0 Deploying the Core Network","text":"cd path-to/docker_open5gs\ndocker compose -f sa-deploy.yaml up -d\n
docker compose down \n
To set up your UEs you'll need to go to the open5gs webpage on your machine. You should see a login screen when accessing http://:3000. The credentials are admin and the password is 1423.
"},{"location":"Core-Network/open5gs/#40-more-information","title":"4.0 More Information","text":"Open5GS - Docker Open5GS - Documentation Open5GS - Github Page srsRAN - Docker; Gradiant - Open5gs in k8s.
"},{"location":"RANs/oai/","title":"OpenAirInterface","text":""},{"location":"RANs/oai/#openairinterface","title":"OpenAirInterface","text":"For our testing we used the 2.1 release of the project.
"},{"location":"RANs/oai/#how-to-build","title":"How to Build","text":"In this setup we have used OpenAirInterface built with the --build-e2 flag.
"},{"location":"RANs/oai/#0-required-dependencies","title":"0. Required dependencies","text":""},{"location":"RANs/oai/#01-building-swig","title":"0.1 Building Swig","text":"$ git clone https://github.com/swig/swig.git && cd swig\n$ git checkout release-4.2\n$ ./autogen.sh\n$ ./configure --prefix=/usr/\n$ make -j$(nproc)\n$ sudo make install\n
"},{"location":"RANs/oai/#02-installing-other-dependencies","title":"0.2 Installing other dependencies","text":"$ sudo apt install libsctp-dev python3 cmake-curses-gui libpcre2-dev\n
"},{"location":"RANs/oai/#1-building-openairinterface","title":"1. Building OpenAirInterface","text":"$ git clone https://gitlab.eurecom.fr/oai/openairinterface5g oai\n$ cd oai\n$ git checkout v2.1.0\n$ ./build_oai -w USRP --gNB --nrUE --build-e2 \n
"},{"location":"RANs/oai/#2-building-flexric","title":"2. Building Flexric","text":"$ cd oai/openair2/E2AP/flexric\n$ mkdir build && cd build\n$ cmake -DSWIG_DIR=/usr/share/swig/4.2.0/ -DSWIG_EXECUTABLE=/usr/bin/swig -DCMAKE_C_COMPILER=gcc-10 -DCMAKE_CXX_COMPILER=g++-10 ..\n$ make -j$(nproc)\n$ sudo make install \n
"},{"location":"RANs/oai/#3-launching-gnb","title":"3. Launching gNB","text":""},{"location":"RANs/oai/#31-gnb-configuration","title":"3.1 gNB configuration","text":"First some modifications on the configuration file are required to make the gNB work properly. Some of the configurations bellow are optional depending on your setup.
$ cd /path-to/openairinterface5g/targets/PROJECTS/GENERIC-NR-5GC/CONF\n$ vi gnb.sa.band78.fr1.106PRB.usrpb210.conf #in this case we're using the usrp b210.\n
tracking_area_code = 1;\nplmn_list = ({ mcc = 001; mnc = 01; mnc_length = 2; snssaiList = ({ sst = 1; }) }); #in this case we are using the test plmn 00101\n...\nmin_rxtxtime = 6;\n...\namf_ip_address = ( { ipv4 = \"191.4.205.169\"; #change this to our amf ip; default for oai cn: 192.168.70.132\n ipv6 = \"192:168:30::17\";\n active = \"yes\";\n preference = \"ipv4\";\n }\n );\n\n\nNETWORK_INTERFACES :\n {\n GNB_INTERFACE_NAME_FOR_NG_AMF = \"br01\"; #change to our host machine network interface of choice\n GNB_IPV4_ADDRESS_FOR_NG_AMF = \"191.4.204.211\"; #change to the ip addr of the interface selected \n GNB_INTERFACE_NAME_FOR_NGU = \"br01\"; #change to our host machine network interface of choice\n GNB_IPV4_ADDRESS_FOR_NGU = \"191.4.204.211\"; #change to the ip addr of the interface selected \n GNB_PORT_FOR_S1U = 2152;\n };\n...\n\ne2_agent = {\n near_ric_ip_addr = \"191.4.204.161\" #change to to the ip addr of the ric. If you are running flexric locally --> 127.0.0.1\n sm_dir = \"/usr/local/lib/flexric/\"\n};\n
"},{"location":"RANs/oai/#32-running-nr-softmodem","title":"3.2 Running nr-softmodem","text":"$ sudo ./nr-softmodem -O /path-to/gnb.conf --sa -E --continuous-tx | tee oai.logs\n
"},{"location":"RANs/oai/#40-f1-2-split","title":"4.0 F1 / 2 Split","text":"First step is to configure both the centralized unit and the distributed unit. The modified configuration that we used can be found here
"},{"location":"RANs/oai/#41-configuration-changes-for-the-cu","title":"4.1 Configuration changes for the CU","text":"...\nlocal_s_if_name = \"enp3s0\"; #set this to your network interface\nlocal_s_address = \"191.4.205.149\"; #use 127.0.0.4 with lo interface if trying to run both units in the same computer.\nremote_s_address = \"191.4.204.52\"; #if you are trying to run more than one DU set this to 0.0.0.0 (any).\n...\namf_ip_address = ( {\n ipv4 = \"192.168.70.132\";\n ipv6 = \"192:168:30::17\";\n active = \"yes\";\n preference = \"ipv4\";\n});\n\n NETWORK_INTERFACES :\n {\n GNB_INTERFACE_NAME_FOR_NG_AMF = \"enp3s0\";\n GNB_IPV4_ADDRESS_FOR_NG_AMF = \"191.4.205.149/23\";\n GNB_INTERFACE_NAME_FOR_NGU = \"enp3s0\";\n GNB_IPV4_ADDRESS_FOR_NGU = \"191.4.205.149/23\";\n GNB_PORT_FOR_S1U = 2152; # Spec 2152\n };\n }\n
"},{"location":"RANs/oai/#42-configuration-changes-for-the-du","title":"4.2 Configuration changes for the DU","text":"...\ngNB_ID = 0xe00; #should be the same as the one defined on CU config.\ngNB_DU_ID = 0xe00; #change this if running more than one DU.\n...\n//////// Physical parameters:\nmin_rxtxtime = 6;\n...\nlocal_n_if_name = \"enp3s0\"; #change this to your interface of choice.\nlocal_n_address = \"191.4.204.149\"; #if running more than one DU change this to something different from the other DUs. \nremote_n_address = \"191.4.204.174\"; #change this to your CU ip addr, if local 127.0.0.4.\n
"},{"location":"RANs/oai/#43-launching-both-cu-and-du","title":"4.3 Launching both CU and DU","text":"$ sudo ./nr-softmodem -O ~/cu.conf --sa -E --continuous-tx\n$ sudo ./nr-softmodem -O ~/du.conf --sa -E --continuous-tx\n
"},{"location":"RANs/srsran/","title":"SRSRAN","text":""},{"location":"RANs/srsran/#srsran","title":"SRSRAN","text":""},{"location":"RANs/srsran/#1-building","title":"1. Building","text":"Version: 23.10.1 UHD version: 4.6.0.0 OS version: Ubuntu Server 22.04 LTS
For building we recommend following the official documentation available here to build srsRAN from source. The UHD driver was used and built from sources following this guide.
"},{"location":"RANs/srsran/#2-running","title":"2. Running","text":"After building and installing srsRAN we recommend running the srsran_performance script available in the official srsran_project repository. Since these changes do not persist remember to re-run them if the machine is restarted.
Watchout as srsRAN requires the bind address to be specified whenever trying to connect to an external machine.
Finnaly, free5gc and OAI CN require a change in routing table of the gNB host. This change can be applied with the commands:
Free5Gc: sudo ip route add 10.100.200.0/24 via {external addr of the core host} dev {name of the network interface used to reach the core host}
.
OAI CN: sudo ip route add 10.100.200.0/26 via {external addr of the core host} dev {name of the network interface used to reach the core host}
.
We installed the non rt ric using kubernetes and containers, to do so we used kubeadm and containerd.
We've started with a machine running Ubuntu Desktop 22.04 LTS and installed docker following their own documentation.
For our cgroup driver we chose systemd. Our configuration file can be found here. Make sure to install kubectl as well. As our pod network add-on, we picked flannel. Finally we removed the node-role.kubernetes.io/control-plane:NoSchedule
taint from all nodes.
Now we should have a healthy kubernetes cluster running so its time to deploy the NearRTRIC itself.
"},{"location":"RICs/flexric/","title":"Flexric","text":""},{"location":"RICs/flexric/#flexric","title":"Flexric","text":""},{"location":"RICs/flexric/#0-required-dependencies","title":"0. Required dependencies","text":""},{"location":"RICs/flexric/#01-prerequisites","title":"0.1 Prerequisites","text":"$ git clone https://github.com/swig/swig.git && cd swig\n$ git checkout release-4.1\n$ ./autogen.sh\n$ ./configure --prefix=/usr/\n$ make -j$(nproc)\n$ sudo make install\n
"},{"location":"RICs/flexric/#02-other-dependencies","title":"0.2 Other dependencies","text":"$ sudo apt install libsctp-dev python3 cmake-curses-gui libpcre2-dev python3-dev\n
"},{"location":"RICs/flexric/#10-building-flexric","title":"1.0 Building Flexric","text":"$ git clone https://gitlab.eurecom.fr/mosaic5g/flexric.git\n$ git checkout <*version>\n
*For an oai install refer back to oai-flexric. More information can be found at.
$ cd flexric\n$ mkdir build && cd build\n$ cmake -DSWIG_DIR=/usr/share/swig/4.1.0/ -DSWIG_EXECUTABLE=/usr/bin/swig -DCMAKE_C_COMPILER=gcc-10 -DCMAKE_CXX_COMPILE=g++-10 ..\n$ make -j$(nproc)\n$ sudo make install\n
"},{"location":"RICs/flexric/#20-running-flexric","title":"2.0 Running Flexric","text":""},{"location":"RICs/flexric/#21-nearrt-ric","title":"2.1 NearRT-RIC","text":"$ cd build/examples/ric/\n$ ./nearRT-RIC # you can use -c to specify a config file\n
"},{"location":"RICs/flexric/#22-running-a-xapp","title":"2.2 Running a xApp","text":"$ cd build/examples/c/monitor/\n$ ./xapp_kpm_moni\n
"},{"location":"RICs/oran-sc/","title":"Near RealTime RIC","text":""},{"location":"RICs/oran-sc/#near-realtime-ric","title":"Near RealTime RIC","text":""},{"location":"RICs/oran-sc/#1-installation","title":"1. Installation","text":""},{"location":"RICs/oran-sc/#11-containerd-and-kubernetes-skip-if-already-installed","title":"1.1. Containerd and kubernetes (skip if already installed)","text":"First things first install containerd, kubeadm and kubectl. The cluster configuration passed to kubeadm is available here and the only required modification is under the \"CertSANs\" option you should change to the hostname of your machine. This modification ensures that the self-signed certificates generated by kubeadm will remain valid for your machine after a change in IP address, which is common for us, as long as you as acessing it using its hostname.
Note: This will install kubernetes using containerd instead of docker, so the cli for your containers is ctr
and the namespace used by kubernetes is k8s.io
Create the kubernetes cluster using the config.yaml provided.
kubeadm init --config config.yaml\n
Install flannel for networking to work
kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml\n
Create namespaces in kubernetes
kubectl create ns ricplt\nkubectl create ns ricxapp\n
Remove master taint so everything can run on the same machine
kubectl taint nodes --all node-role.kubernetes.io/control-plane-\n
"},{"location":"RICs/oran-sc/#13-installing-the-near-rt-ric","title":"1.3. Installing the Near-RT RIC","text":"Clone O-RAN SCs ric-dep
repository
git clone \"https://gerrit.o-ran-sc.org/r/ric-plt/ric-dep\"\n
Start a chartsmuseum instance (this is a repository for helm charts).
chartmuseum --debug --port 6873 --storage local --storage-local-rootdir $HOME/helm/chartsmuseum/\n
Add a plugin to allow helm to push charts to its chartmuseums
helm plugin install https://github.com/chartmuseum/helm-push\n
Add the local museum to helm
helm repo add local http://localhost:6873/\nhelm repo list\n
Build the helm charts and upload them to the local chartsmuseum
cd helm/charts\nmake nearrtric\n
Install the RIC using the provided in the ric-dep repository
helm install nearrtric -n ricplt local/nearrtric -f RECIPE_EXAMPLE/example_recipe_oran_h_release.yaml\n
"},{"location":"RICs/oran-sc/#2-building-xapps","title":"2. Building xApps","text":"Here are the instructions to build the modified version of the bouncer xApp used in tests (the one that works with srsRAN 23.10.1). First clone the repository:
git clone https://github.com/gckopper/bouncer-xapp -b srsRAN-E2 --depth 1\n
Note: the -b
flag allows you to clone a specific branch, in this case the srsRAN-E2
branch, and the --depth 1
flag tells git to only clone the latest commit. Then there is a script inside the repository that builds the container image using docker and saves it to containerd internal repository. Building images with ctr is not possible at the moment. Once the image has finished build it is time to deploy it.
"},{"location":"RICs/oran-sc/#3-deploying-and-managing-xapps","title":"3. Deploying and managing xApps","text":"To deploy the container image, first clone the official appmgr repository from O-RAN.
git clone https://gerrit.o-ran-sc.org/r/ric-plt/appmgr\n
This application is used to manage xApps. So lets build it and install it. First we need to go inside the correct folder
cd appmgr/xapp_orchestrater/dev/xapp_onboarder\n
Create a python virtual environmente to isolate the dependencies of the xApp Onboarder from your system. Just remember that to use this application you'll need to activate the environment using the second command.
python3 -m venv venv3\nsource venv/bin/activate\npip install -r requirements.txt\n
If you need to deactivate this virtual environment simply use the command deativate
. Onboard the xApp. Keep in mind that the typo in shcema
is necessary.
CHART_REPO_URL=http://localhost:6873 dms_cli onboard --config-file-path <path-to-bouncer-xapp>/Bouncer/init/config-file.json --shcema_file_path <path-to-bouncer-xapp>/Bouncer/init/schema.json\n
Download the chart you've just created.
CHART_REPO_URL=http://localhost:6873 dms_cli download_helm_chart bouncer-xapp 2.0.0\n
Install the xApp with helm.
helm install bouncer-xapp -n ricxapp bouncer-xapp-2.0.0.tgz\n
Once installed you can start and stop the application by scaling its deployment in kubernetes.
# start xapp\nkubectl scale --replicas=1 -n ricxapp deployment ricxapp-bouncer-xapp\n# stop xapp\nkubectl scale --replicas=0 -n ricxapp deployment ricxapp-bouncer-xapp\n
Note: When developing or changing the code for the xApp you may need to update the running version of the container image. To do so re-build the image using the script as that will update the image in your local containerd repository. Then simply scale the xApp deployment down and up and the new version should be running. This will only work if the version stays the same.
"},{"location":"UE/simcard/","title":"SIM cards","text":""},{"location":"UE/simcard/#sim-cards","title":"SIM cards","text":"The SIM cards user are made by osmocom so we used their tool to flash new identities to the cards.
git clone https://github.com/osmocom/pysim\ncd pysim\nsudo apt-get install --no-install-recommends \\\n pcscd libpcsclite-dev \\\n python3 \\\n python3-setuptools \\\n python3-pyscard \\\n python3-pip\npip3 install -r requirements.txt\n
The values in the card used in our testing was flashed using the following command (note: the -a
part is your ADM-KEY and it will differ from ours)
./pySim-prog.py -p0 -s 8988211000000689615 --mcc=001 --mnc=01 -a 77190612 --imsi=001010123456789 -k 41B7157E3337F0ADD8DA89210D89E17F --opc=1CD638FC96E02EBD35AA0D41EB6F812F\n
We also had a seconf simcard with the following configuration
imsi: 901700000028080\nk: 724d0f31f2259622700437430b7b5c6e\nopc: 1140620b2805d84b44643bfcfbe6218c\n
"}]}
\ No newline at end of file
diff --git a/sitemap.xml.gz b/sitemap.xml.gz
index 97855f6eb80e027b556edf8565026ebec220a794..2f826fd3f9a729daf509f2f38bf6078e6fb6580e 100644
GIT binary patch
delta 13
Ucmb=gXP58h;ApVAH<7&p039U+XaE2J
delta 13
Ucmb=gXP58h;9y|5I+48s02to`r2qf`