Upload ex6 and ex7

This commit is contained in:
jakob.schratter 2026-01-04 20:15:55 +01:00
commit 6c2d96ff4d
44 changed files with 15291 additions and 0 deletions

154
ex7/ex7_2/GCC_default.mk Normal file
View file

@ -0,0 +1,154 @@
# Basic Defintions for using GNU-compiler suite sequentially
# requires setting of COMPILER=GCC_
#startmake as follows to avoid warnings caused by OpenMPI code
# make 2>&1 | grep -v openmpi
MPI_ROOT=/usr/bin/
CC = ${MPI_ROOT}mpicc
CXX = ${MPI_ROOT}mpicxx
F77 = ${MPI_ROOT}mpif77
LINKER = ${CXX}
# If you 'mpirun ...' reports some error "... not enough slots .." then use the option '--oversubscribe'
MPIRUN = ${MPI_ROOT}mpirun --oversubscribe -display-map
#MPIRUN = ${MPI_ROOT}mpiexec
# 2023, Oct 23: ""WARNING: There is at least non-excluded one OpenFabrics device found,"
# solution according to https://github.com/open-mpi/ompi/issues/11063
MPIRUN += -mca btl ^openib
# KFU:sauron
CXXFLAGS += -I/software/boost/1_72_0/include
WARNINGS = -Wall -pedantic -Woverloaded-virtual -Wfloat-equal -Wshadow \
-Wredundant-decls -Wunreachable-code -Winline -fmax-errors=1
# WARNINGS += -Weffc++ -Wextra
# -Wno-pragmas
CXXFLAGS += -std=c++17 -ffast-math -O3 -march=native ${WARNINGS}
# -ftree-vectorizer-verbose=5 -DNDEBUG
# -ftree-vectorizer-verbose=2
# CFLAGS = -ffast-math -O3 -DNDEBUG -msse3 -fopenmp -fdump-tree-vect-details
# CFLAGS = -ffast-math -O3 -funroll-loops -DNDEBUG -msse3 -fopenmp -ftree-vectorizer-verbose=2
# info on vectorization
#VECTORIZE = -ftree-vectorize -fdump-tree-vect-blocks=foo.dump
#-fdump-tree-pre=stderr
VECTORIZE = -ftree-vectorize -fopt-info -ftree-vectorizer-verbose=5
#CXXFLAGS += ${VECTORIZE}
# -funroll-all-loops -msse3
#GCC -march=knl -march=broadwell -march=haswell
# for debugging purpose (save code)
# -fsanitize=leak # only one out the trhee can be used
# -fsanitize=address
# -fsanitize=thread
SANITARY = -fsanitize=address -fsanitize=undefined -fsanitize=null -fsanitize=return \
-fsanitize=bounds -fsanitize=alignment -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow \
-fsanitize=bool -fsanitize=enum -fsanitize=vptr
#CXXFLAGS += ${SANITARY}
#LINKFLAGS +=${SANITARY}
# OpenMP
CXXFLAGS += -fopenmp
LINKFLAGS += -fopenmp
default: ${PROGRAM}
${PROGRAM}: ${OBJECTS}
$(LINKER) $^ ${LINKFLAGS} -o $@
@echo
@echo "Start with : $(MPIRUN) -np num_proc $(MPIFLAGS) $(PROGRAM)"
@echo
clean:
@rm -f ${PROGRAM} ${OBJECTS} gmon.out
clean_all:: clean
@rm -f *_ *~ *.bak *.log *.out *.tar *.orig
@rm -rf html latex
run: ${PROGRAM}
${MPIRUN} -np 8 ./$^
# tar the current directory
MY_DIR = `basename ${PWD}`
tar: clean_all
@echo "Tar the directory: " ${MY_DIR}
@cd .. ;\
tar cf ${MY_DIR}.tar ${MY_DIR} *default.mk ;\
cd ${MY_DIR}
# tar cf `basename ${PWD}`.tar *
zip: clean
@echo "Zip the directory: " ${MY_DIR}
@cd .. ;\
zip -r ${MY_DIR}.zip ${MY_DIR} *default.mk ;\
cd ${MY_DIR}
doc:
doxygen Doxyfile
#########################################################################
.cpp.o:
$(CXX) -c $(CXXFLAGS) -o $@ $<
# 2>&1 | grep -v openmpi
# special: get rid of compiler warnings genermaeate by openmpi-files
#.cpp.o:
# @$(CXX) -c $(CXXFLAGS) $< 2>/tmp/t.txt || grep -sv openmpi /tmp/t.txt
# |grep -sv openmpi
.c.o:
$(CC) -c $(CFLAGS) -o $@ $<
.f.o:
$(F77) -c $(FFLAGS) -o $@ $<
##################################################################################################
# some tools
# Cache behaviour (CXXFLAGS += -g tracks down to source lines; no -pg in linkflags)
cache: ${PROGRAM}
valgrind --tool=callgrind --simulate-cache=yes ./$^
# kcachegrind callgrind.out.<pid> &
kcachegrind `ls -1tr callgrind.out.* |tail -1`
# Check for wrong memory accesses, memory leaks, ...
# use smaller data sets
# no "-pg" in compile/link options
mem: ${PROGRAM}
valgrind -v --leak-check=yes --tool=memcheck --undef-value-errors=yes --track-origins=yes --log-file=$^.addr.out --show-reachable=yes mpirun -np 8 ./$^
# Graphical interface
# valkyrie
# Simple run time profiling of your code
# CXXFLAGS += -g -pg
# LINKFLAGS += -pg
prof: ${PROGRAM}
perf record ./$^
perf report
# gprof -b ./$^ > gp.out
# kprof -f gp.out -p gprof &
#Trace your heap:
#> heaptrack ./main.GCC_
#> heaptrack_gui heaptrack.main.GCC_.<pid>.gz
heap: ${PROGRAM}
heaptrack ./$^ 11
heaptrack_gui `ls -1tr heaptrack.$^.* |tail -1` &
codecheck: $(SOURCES)
cppcheck --enable=all --inconclusive --std=c++17 --suppress=missingIncludeSystem $^
########################################################################
# get the detailed status of all optimization flags
info:
echo "detailed status of all optimization flags"
$(CXX) --version
$(CXX) -Q $(CXXFLAGS) --help=optimizers

54
ex7/ex7_2/ex7_2/Makefile Executable file
View file

@ -0,0 +1,54 @@
#
# use GNU-Compiler tools
COMPILER=GCC_
# COMPILER=GCC_SEQ_
# alternatively from the shell
# export COMPILER=GCC_
# or, alternatively from the shell
# make COMPILER=GCC_
MAIN = main
SOURCES = ${MAIN}.cpp greetings.cpp
OBJECTS = $(SOURCES:.cpp=.o)
PROGRAM = ${MAIN}.${COMPILER}
# uncomment the next to lines for debugging and detailed performance analysis
CXXFLAGS += -g
# -DNDEBUG
# -pg slows down the code on my laptop when using CLANG_
LINKFLAGS += -g
#-pg
#CXXFLAGS += -Q --help=optimizers
#CXXFLAGS += -fopt-info
include ../${COMPILER}default.mk
#############################################################################
# additional specific cleaning in this directory
clean_all::
@rm -f t.dat*
#############################################################################
# special testing
# NPROCS = 4
#
TFILE = t.dat
# TTMP = t.tmp
#
graph: $(PROGRAM)
# @rm -f $(TFILE).*
# next two lines only sequentially
./$(PROGRAM)
@mv $(TFILE).000 $(TFILE)
# $(MPIRUN) $(MPIFLAGS) -np $(NPROCS) $(PROGRAM)
# @echo " "; echo "Manipulate data for graphics."; echo " "
# @cat $(TFILE).* > $(TTMP)
# @sort -b -k 2 $(TTMP) -o $(TTMP).1
# @sort -b -k 1 $(TTMP).1 -o $(TTMP).2
# @awk -f nl.awk $(TTMP).2 > $(TFILE)
# @rm -f $(TTMP).* $(TTMP) $(TFILE).*
#
-gnuplot jac.dem

View file

@ -0,0 +1,89 @@
#include "greetings.h"
#include <cassert>
#include <cstring>
#include <iostream>
#include <mpi.h> // MPI
#include <string>
using namespace std;
// see http://www.open-mpi.org/doc/current
// for details on MPI functions
void greetings(MPI_Comm const &icomm)
{
int myrank, numprocs;
MPI_Comm_rank(icomm, &myrank); // my MPI-rank
MPI_Comm_size(icomm, &numprocs); // #MPI processes
char *name = new char [MPI_MAX_PROCESSOR_NAME],
*chbuf = new char [MPI_MAX_PROCESSOR_NAME];
int reslen, ierr;
MPI_Get_processor_name( name, &reslen);
if (0==myrank) {
cout << " " << myrank << " runs on " << name << endl;
for (int i = 1; i < numprocs; ++i) {
MPI_Status stat;
stat.MPI_ERROR = 0; // M U S T be initialized!!
//ierr = MPI_Recv(chbuf, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, MPI_ANY_SOURCE, MPI_ANY_TAG, icomm, &stat);
ierr = MPI_Recv(chbuf, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, i, i, icomm, &stat);
assert(0==ierr);
cout << " " << stat.MPI_SOURCE << " runs on " << chbuf;
int count;
MPI_Get_count(&stat, MPI_CHAR, &count); // size of received data
cout << " (length: " << count << " )" << endl;
// stat.Get_error() // Error code
}
}
else {
int dest = 0;
ierr = MPI_Send(name, strlen(name) + 1, MPI_CHAR, dest, myrank, icomm);
assert(0==ierr);
}
delete [] chbuf;
delete [] name;
return;
}
void greetings_cpp(MPI_Comm const &icomm)
{
int myrank, numprocs;
MPI_Comm_rank(icomm, &myrank); // my MPI-rank
MPI_Comm_size(icomm, &numprocs); // #MPI processes
string name(MPI_MAX_PROCESSOR_NAME,'#'), // C++
recvbuf(MPI_MAX_PROCESSOR_NAME,'#'); // C++: receive buffer, don't change size
int reslen, ierr;
MPI_Get_processor_name(name.data(), &reslen);
name.resize(reslen); // C++
if (0==myrank) {
cout << " " << myrank << " runs on " << name << endl;
for (int i = 1; i < numprocs; ++i) {
MPI_Status stat;
stat.MPI_ERROR = 0; // M U S T be initialized!!
//ierr = MPI_Recv(recvbuf.data(), MPI_MAX_PROCESSOR_NAME, MPI_CHAR, MPI_ANY_SOURCE, MPI_ANY_TAG, icomm, &stat);
ierr = MPI_Recv(recvbuf.data(), MPI_MAX_PROCESSOR_NAME, MPI_CHAR, i, i, icomm, &stat);
assert(0==ierr);
int count;
MPI_Get_count(&stat, MPI_CHAR, &count); // size of received data
string const chbuf(recvbuf,0,count); // C++
cout << " " << stat.MPI_SOURCE << " runs on " << chbuf;
cout << " (length: " << count << " )" << endl;
// stat.Get_error() // Error code
}
}
else {
int dest = 0;
ierr = MPI_Send(name.data(), name.size(), MPI_CHAR, dest, myrank, icomm);
assert(0==ierr);
}
return;
}

View file

@ -0,0 +1,11 @@
// general header for all functions in directory
#ifndef GREETINGS_FILE
#define GREETINGS_FILE
#include <mpi.h>
void greetings(MPI_Comm const &icomm);
void greetings_cpp(MPI_Comm const &icomm);
#endif

40
ex7/ex7_2/ex7_2/main.cpp Normal file
View file

@ -0,0 +1,40 @@
#include <iostream>
#include <mpi.h>
#include "greetings.h"
using namespace std;
int main(int argc , char **argv )
{
// -------------------- E2 --------------------
MPI_Init(&argc, &argv); // Initializes the MPI execution environment
// -------------------- E1 --------------------
MPI_Comm const icomm(MPI_COMM_WORLD); // MPI_COMM_WORLD ... all processes
// -------------------- E3 --------------------
int rank;
MPI_Comm_rank(icomm, &rank); // Determines the rank of the calling process in the communicator.
if (rank == 0)
{
int size;
MPI_Comm_size(icomm, &size); // Returns the size of the group associated with a communicator.
cout << "Process " << rank << " says: " << size << " proesses are running." << endl;
}
// To vary number of processes: changed number in GCC_default.mk file
// alternatively, call in terminal:
// /usr/bin/mpirun --oversubscribe -display-map -mca btl ^openib -np 4 ./main.GCC_
// or
// /usr/bin/mpirun --oversubscribe -display-map -mca btl ^openib -np 8 ./main.GCC_
// -------------------- E4 --------------------
greetings_cpp(MPI_COMM_WORLD); // greetings with sorted output
MPI_Finalize(); // Terminates MPI execution environment
return 0;
}