Difference between revisions of "Containers for CyberShake"
Line 162: | Line 162: | ||
1. Make MPI Program - (Ex: named sum_sqrt.c) | 1. Make MPI Program - (Ex: named sum_sqrt.c) | ||
<pre> | <pre> | ||
+ | #include <mpi.h> | ||
+ | #include <stdio.h> | ||
+ | #include <stdlib.h> | ||
+ | |||
+ | int main(int argc, char** argv) { | ||
+ | //Grab Argument | ||
+ | char* temp = argv[1]; | ||
+ | int numN = atoi(temp); //N | ||
+ | printf("Argument N: %d \n", numN); | ||
+ | |||
+ | // Initialize the MPI environment | ||
+ | MPI_Init(NULL, NULL); | ||
+ | |||
+ | // Get the number of processes | ||
+ | int world_size; | ||
+ | MPI_Comm_size(MPI_COMM_WORLD, &world_size); | ||
+ | // Get the rank of the process | ||
+ | printf("Processes: %d \n", world_size); | ||
+ | |||
+ | int world_rank; | ||
+ | MPI_Comm_rank(MPI_COMM_WORLD, &world_rank); | ||
+ | |||
+ | //Local Variables | ||
+ | int nglobal = numN; | ||
+ | int block = nglobal/world_size; | ||
+ | int my_lo = (world_rank*block)+1, my_hi = (world_rank+1)*block; | ||
+ | /** Blocks | ||
+ | * int nlocal = nglobal/psize; flipped -> 1000/32 = 31.25 -> 31 | ||
+ | * 31 | ||
+ | int my_lo = (myrank*nlocal)+1, my_hi = (myrank+1)*nlocal); | ||
+ | |||
+ | |||
+ | rank low high inclusive | ||
+ | 0 1 31 <=TO Do: Add loop to process 0 nlocal-1 | ||
+ | 1 32 62 | ||
+ | 2 63 93 | ||
+ | 3 93 124 | ||
+ | 4 124 135 | ||
+ | * */ | ||
+ | |||
+ | if(world_rank==0){ //master process | ||
+ | int mySum=0; | ||
+ | int pSum=0; | ||
+ | int totalSum=0; | ||
+ | |||
+ | printf("Main Process Start\n"); | ||
+ | |||
+ | //send to P processors | ||
+ | for(int myprocessor=1; myprocessor <world_size; myprocessor++){ | ||
+ | MPI_Send(&block, 1, MPI_INT, myprocessor, MPI_ANY_TAG, MPI_COMM_WORLD); | ||
+ | } | ||
+ | |||
+ | //process my block | ||
+ | for(int i=1 ; i <= block; i++){ | ||
+ | mySum+=(i*i); | ||
+ | } | ||
+ | |||
+ | //process rounded truncated block | ||
+ | for(int left_over=block*world_size+1; left_over <= numN; left_over++){ | ||
+ | mySum+=(left_over*left_over); | ||
+ | } | ||
+ | totalSum+=mySum; | ||
+ | |||
+ | //receive P processors | ||
+ | for(int myprocessor=1; myprocessor < world_size; myprocessor++){ | ||
+ | MPI_Recv(&pSum, 1, MPI_INT, myprocessor, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); | ||
+ | totalSum+=pSum; | ||
+ | printf("MpSum: %d\n", pSum); | ||
+ | } | ||
+ | |||
+ | //print final total | ||
+ | printf("Sum of Squares for %d is %d\n", numN, totalSum); | ||
+ | |||
+ | printf("Main Process End"); | ||
+ | }else if(world_rank != 0){ //worker process | ||
+ | printf("Start Process: %d\n", world_rank); | ||
+ | int mySum=0; | ||
+ | //receive | ||
+ | MPI_Recv(&block, 1, MPI_INT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); | ||
+ | |||
+ | //calculate my sum of square | ||
+ | for(int i=my_lo; i < my_hi; i++){ | ||
+ | mySum+=(i*i); | ||
+ | } | ||
+ | |||
+ | //send my sum | ||
+ | MPI_Send(&mySum, 1, MPI_INT, 0, MPI_ANY_TAG, MPI_COMM_WORLD); | ||
+ | printf("End Process: %d\n", world_rank); | ||
+ | } | ||
+ | |||
+ | |||
+ | // Finalize the MPI environment. | ||
+ | MPI_Finalize(); | ||
+ | |||
+ | return 0; | ||
+ | } | ||
</pre> | </pre> | ||
Line 172: | Line 268: | ||
mvapich preinstalled in this container | mvapich preinstalled in this container | ||
<pre>$ idev -N 1 | <pre>$ idev -N 1 | ||
− | $ singularity pull | + | $ singularity pull shub://mkandes/ubuntu-mvapich</pre> |
4. Execute your command | 4. Execute your command | ||
− | <pre>$ ibrun singularity exec ./sum_sqrt 100000</pre> | + | <pre>$ ibrun singularity exec ubuntu-mvapich_latest.sif ./sum_sqrt 100000</pre> |
== Resources == | == Resources == |
Revision as of 07:19, 7 August 2020
This page is to document the steps involved in enabling the CyberShake codebase to run in a container environment.
Contents
Selection of Containers
The available HPC Containers at the time of selection were Singularity, Charlie Cloud, and Shifter. Between the 3 of these container technologies, Singularity was widely adapted and had more open source tools. Because of this wide adaptation the module already existed in the Frontera system. Singularity has built-in support for different MPI libraries from OpenMPI, MPICH, and IntelMPI to name a few. Shifter, although light weight, is highly reliant on MPICH ABI. This would require site-specific MPI libraries to be copied to the container at runtime.
Installing Singularity
Recommended for people who want to run Singularity locally or create there own custom containers. Use of premade containers does not require installation.
Install Dependencies
sudo apt-get update && sudo apt-get install -y \ build-essential \ uuid-dev \ libgpgme-dev \ squashfs-tools \ libseccomp-dev \ wget \ pkg-config \ git \ cryptsetup-bin
Download Go
export VERSION=1.13.5 OS=linux ARCH=amd64 && \ wget https://dl.google.com/go/go$VERSION.$OS-$ARCH.tar.gz && \ sudo tar -C /usr/local -xzvf go$VERSION.$OS-$ARCH.tar.gz && \ rm go$VERSION.$OS-$ARCH.tar.gz
Set Up Go
echo 'export GOPATH=${HOME}/go' >> ~/.bashrc && \ echo 'export PATH=/usr/local/go/bin:${PATH}:${GOPATH}/bin' >> ~/.bashrc && \ source ~/.bashrc
Install Singularity
export VERSION=3.5.2 && # adjust this as necessary \ wget https://github.com/sylabs/singularity/releases/download/v${VERSION}/singularity-${VERSION}.tar.gz && \ tar -xzf singularity-${VERSION}.tar.gz && \ cd singularity
Check if Singularity Works
git clone https://github.com/sylabs/singularity.git && \ cd singularity && \ git checkout v3.5.2
Setting up a serial container (on your computer)
Get Image singularity pull <source>*
$ singularity build myPythonContainer.sif library://default/ubuntu:latest
- <sources> include Singularity Container Library (library), Singularity Hub (shub) and Docker Hub (docker).
Execute Command in from Outside Container singularity exec imageName command
$ singularity exec myPythonContainer.sif cat /etc/lsb-release
singularity exec image_name command
$ singularity exec myPythonContainer.sif python3 helloWorld.py
Find Size of Container:
$ singularity cache list
- Note: Singularity cannot run on the Login Node
Basic Singularity Commands
Pull - pulls a container image from a remote source.
$ sudo singularity pull <remote source>
<remote source>:
1. Singularity Container Services [1]
$ sudo singularity pull --name CONTAINER_NAME.sif library://USER/PULL_PATH:VERSION
- Note: the path only needs to match the pull card. please see the remote website for example.
2. Singularity Hub [2]
$ sudo singularity pull --name CONTAINER_NAME.sif shub://USER/PULL_PATH:VERSION
- Note: the path only needs to match the pull card. please see the remote website for example.
3. Docker Hub [3]
$ sudo singularity build CONTAINER_NAME.sif docker://USER/PULL_PATH:VERSION
- Note 1: docker images have layers and it needs to be merged into 1 singularity image. For that to happen you MUST use: build
- Note 2: the path only needs to match the pull card. please see the remote website for example.
Exec - executes an EXTERNAL COMMAND
$ singularity exec IMAGE_NAME.sif EXTERNAL_COMMAND
Shell - shells into an existing container
$ singularity shell IMAGE_NAME.sif
- Note: Your home directory is mounted by default
Run - runs an image. Run is based on the Run Script parameters that were placed into the container when the image was built based the recipe
$ singularity run IMAGE_NAME.sif
Build (BIG TO DO: Very important... a lot of details and opinions)
$ singularity build IMAGE_NAME.sif <source>
<source> include -Another Image either docker or singularity -Singularity definition file (use to be known as a recipe file), usually denoted with name.def
Note:
You can shell into a docker UI - explore different containers without pulling or building
$ singularity shell docker://ubuntu
Creating Definition Files: (To Do) Set up complex workflows with Recipe File: Alternatively- Sandbox Directory Prototype Final Container: sudo singularity build --sandbox ubuntu_s docker://ubuntu
Building or Using Pre Made Containers
Frontera
Summit
Generic
Containers on Frontera
Serial Containers
1. Prepare
- Make helloWorld.py
$ echo "print(\"Hello World\")" > helloWorld.py
- Install Module (only if using Supercomputer):
$ module load tacc-singularity
*Note: module save (if you plan to use singularity a lot)
2. Get a Singularity Image on Frontera (*Note: If you want to write a particular program, you must have the dependencies installed in the container) Options:
- By copying a image from your local to Frontera with scp
- Pull from the Computation Node
idev -N 1; singularity pull singularity pull library://libii/scec/ubuntu18.10-python3:sha256.522b070ad79309ef7526f87c34f0f8518e7d7acc6399aa6372fb0cf28fea25a1
- Note: This command works in a sbatch file.
3-1. Interface with Computation Node
a. idev session
idev ibrun singularity exec ubuntu18.10-python3_latest.sif python3 helloWorld.py
b. sbatch (recommended)
#!/bin/bash #SBATCH -p development #SBATCH -t 00:05:00 #SBATCH -n 1 #SBATCH -N 1 #SBATCH -J test-singularity-python #SBATCH -o test-singularity-python.o%j #SBATCH -e test-singularity-python.e%j # Run the actual program singularity exec ubuntu18.10-python3_latest.sif python3 helloPython.py
3-2. Execute from Local Computer (if Singularity is installed)
$ singularity exec ubuntu18.10-python3_latest.sif python3 helloWorld.py
MPI Containers
1. Make MPI Program - (Ex: named sum_sqrt.c)
#include <mpi.h> #include <stdio.h> #include <stdlib.h> int main(int argc, char** argv) { //Grab Argument char* temp = argv[1]; int numN = atoi(temp); //N printf("Argument N: %d \n", numN); // Initialize the MPI environment MPI_Init(NULL, NULL); // Get the number of processes int world_size; MPI_Comm_size(MPI_COMM_WORLD, &world_size); // Get the rank of the process printf("Processes: %d \n", world_size); int world_rank; MPI_Comm_rank(MPI_COMM_WORLD, &world_rank); //Local Variables int nglobal = numN; int block = nglobal/world_size; int my_lo = (world_rank*block)+1, my_hi = (world_rank+1)*block; /** Blocks * int nlocal = nglobal/psize; flipped -> 1000/32 = 31.25 -> 31 * 31 int my_lo = (myrank*nlocal)+1, my_hi = (myrank+1)*nlocal); rank low high inclusive 0 1 31 <=TO Do: Add loop to process 0 nlocal-1 1 32 62 2 63 93 3 93 124 4 124 135 * */ if(world_rank==0){ //master process int mySum=0; int pSum=0; int totalSum=0; printf("Main Process Start\n"); //send to P processors for(int myprocessor=1; myprocessor <world_size; myprocessor++){ MPI_Send(&block, 1, MPI_INT, myprocessor, MPI_ANY_TAG, MPI_COMM_WORLD); } //process my block for(int i=1 ; i <= block; i++){ mySum+=(i*i); } //process rounded truncated block for(int left_over=block*world_size+1; left_over <= numN; left_over++){ mySum+=(left_over*left_over); } totalSum+=mySum; //receive P processors for(int myprocessor=1; myprocessor < world_size; myprocessor++){ MPI_Recv(&pSum, 1, MPI_INT, myprocessor, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); totalSum+=pSum; printf("MpSum: %d\n", pSum); } //print final total printf("Sum of Squares for %d is %d\n", numN, totalSum); printf("Main Process End"); }else if(world_rank != 0){ //worker process printf("Start Process: %d\n", world_rank); int mySum=0; //receive MPI_Recv(&block, 1, MPI_INT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); //calculate my sum of square for(int i=my_lo; i < my_hi; i++){ mySum+=(i*i); } //send my sum MPI_Send(&mySum, 1, MPI_INT, 0, MPI_ANY_TAG, MPI_COMM_WORLD); printf("End Process: %d\n", world_rank); } // Finalize the MPI environment. MPI_Finalize(); return 0; }
2. Compile Program
$ mpicc -o sum_sqrt sum_sqrt.c
3. Build or Pull a Singularity Image with the same MPI library installed inside the container [4]
mvapich preinstalled in this container
$ idev -N 1 $ singularity pull shub://mkandes/ubuntu-mvapich
4. Execute your command
$ ibrun singularity exec ubuntu-mvapich_latest.sif ./sum_sqrt 100000
Resources
- Singularity Guide [5]
- Singularity Repository [6]
- Singularity Container Library [7]
- Singularity Hub [8]
- Docker Hub [9]
TACC - Frontera
- TACC Containers [10] (More geared for people who are familiar with Docker Containers)