Quantcast
Channel: Intel® Software - Intel® oneAPI Math Kernel Library & Intel® Math Kernel Library
Viewing all articles
Browse latest Browse all 3005

Error in diagonalization (Siesta) with >=2018.2.046

$
0
0

I am trying out the latest MKL version with Siesta (4.0.2). However, I keep on getting

{    0,    0}:  On entry to 
PDSTEDC parameter number   10 had an illegal value 
....

for 2018.2.046 and 2018.3.051. I am compiling and running on the same node in a for loop:

for v in 2017.7.065 2018.0.033  2018.1.038 2018.2.046 2018.3.051
do
module purge
module load intel/$v
module load intel/$v.mkl
module load intel/$v.mpi
make clean
make
cd Tests/si64
make SIESTA="mpiexec -np $PBS_NP ../../../siesta" label=$v
cd ../../
done

Note that only the last two are failing with the above error message.
The arch.make file is this:

SIESTA_ARCH=intel-mpi
FC=mpiifort

FFLAGS = -O2 -xHost -fp-model precise -prec-sqrt -prec-div \
     -fimf-precision=high -ip -mp1 -fpp -heap-arrays 1024 -i4 \
   -double-size 64 -real-size 32 \
     -warn unused,truncated_source,uncalled,declarations,usage
FFLAGS_SPECIAL= -O1 -xHost -ip -mp1 -fpp -heap-arrays 1024 -i4 \
    -double-size 64 -real-size 32 -fp-model precise \
        -warn unused,truncated_source,uncalled,declarations,usage

DUMMY_FOX=--enable-dummy
FFLAGS_DEBUG=-g -O0 -debug full -traceback -C
LDFLAGS= -static-intel -static-libgcc
RANLIB=ranlib
FC_SERIAL=ifort
#
NETCDF_LIBS=
NETCDF_INTERFACE=
FPPFLAGS_CDF=

MPI_INTERFACE=libmpi_f90.a
MKL_INCLUDE=-I$(MKLROOT)/include/intel64/lp64
MPI_LIBS=-L$(I_MPI_ROOT)/intel64/lib -lmpi
MKL_LIBS=$(MKLROOT)/lib/intel64
MPI_INCLUDE=-I$(I_MPI_ROOT)/intel64/include
INCFLAGS=$(MPI_INCLUDE) $(MKL_INCLUDE)
#
FPPFLAGS_MPI=-DMPI -DMPI_TIMING -DFC_HAVE_FLUSH -DFC_HAVE_ABORT -DSIESTA__NO_MRRR
##
NETCDF_LIBS=
NETCDF_INTERFACE=
##
LIBS=   $(MPI_LIB) -mkl=parallel \
  ${MKLROOT}/lib/intel64/libmkl_blas95_lp64.a \
  ${MKLROOT}/lib/intel64/libmkl_scalapack_lp64.a \
  -Wl,--start-group \
  ${MKLROOT}/lib/intel64/libmkl_intel_lp64.a \
  ${MKLROOT}/lib/intel64/libmkl_sequential.a \
  ${MKLROOT}/lib/intel64/libmkl_core.a \
  ${MKLROOT}/lib/intel64/libmkl_blacs_intelmpi_lp64.a \
  -Wl,--end-group -lstdc++ -lpthread -lm -ldl
#
SYS=nag
FPPFLAGS= $(FPPFLAGS_CDF) $(FPPFLAGS_MPI)
##
atom.o: atom.F
        $(FC) -c $(FFLAGS_SPECIAL) $(INCFLAGS) $(FPPFLAGS) $(FPPFLAGS_fixed_F) $<
state_analysis.o: state_analysis.F
        $(FC) -c $(FFLAGS_SPECIAL) $(INCFLAGS) $(FPPFLAGS) $(FPPFLAGS_fixed_F) $<
.F.o:
        $(FC) -c $(FFLAGS) $(INCFLAGS) $(FPPFLAGS) $<
.f.o:
        $(FC) -c $(FFLAGS) $(INCFLAGS) $<
.F90.o:
        $(FC) -c $(FFLAGS) $(INCFLAGS) $(FPPFLAGS) $<
.f90.o:
        $(FC) -c $(FFLAGS) $(INCFLAGS) $<

I am running the test with 8 cores, and the architechture is: XeonE5-2665.

Thanks!


Viewing all articles
Browse latest Browse all 3005

Trending Articles



<script src="https://jsc.adskeeper.com/r/s/rssing.com.1596347.js" async> </script>