// This is the last version of the code mainmpi.cpp. All rights reserved. // Copyright (C) 2012 by U. Aldasoro, M.A. Garin, M. Merino and G. Perez, // from UPV/EHU. // No part of this code may be reproduced, modified or transmitted, in any // form or by any means without the prior written permission of the authors. // Cluster models are in mps format, as external files #include "itoa.h" #include #include #include #include #include #include #include #include using namespace std; #include "ClpSimplex.hpp" #include "CoinHelperFunctions.hpp" #include "CoinBuild.hpp" #include "CoinModel.hpp" #include #include #include "OsiClpSolverInterface.hpp" #include "CbcModel.hpp" #define rmaxmin (1) // (1): Maximum; (-1): Minimum #define nmodel 44 // Number of cluster models #define useCplex 1 // Select solver. 0: COIN-OR; 1: Cplex #define threadsCplex 2 // Number of parallel threads used by each MPI thread #ifdef useCplex #include "OsiCpxSolverInterface.hpp" #endif int main(int argc, char **argv) { // STEP 0 - DECLARING OPTIMIZATION AND MPI VARIABLES int imod,i,j,loc,pid,npr,lp,original_rank,original_size,error,ncols; int assignment[nmodel],inicial[nmodel],nonzero[nmodel],numres[nmodel], numvar[nmodel],numvarint[nmodel],assignmentX0[nmodel],iniX0[nmodel], istruef969[nmodel]; double zq[nmodel]; MPI_Group orig_group,new_group; //MPI groups MPI_Comm new_comm; //MPI communicator // STEP 1 - DEFINITION OF THE GLOBAL ENVIRONMENT MPI_Init(&argc,&argv); // Beginning of the MPI environment MPI_Comm_size(MPI_COMM_WORLD, &original_size); // Total number of threads MPI_Comm_rank(MPI_COMM_WORLD, &original_rank); // Who am I? // 1.1 - Creating a new communicator considering active threads // 1.1.1- Basic variables int ranks1[nmodel]; int *ranks2; ranks2=new int[original_size]; for(i=0;i ncols_max_loc) {ncols_max_loc = sol0[loc].getNumCols();} } else { sol1[loc].setObjSense(rmaxmin); sol1[loc].readMps(model); //Read cluster submodel nonzero_loc[loc] = sol1[loc].getNumElements(); numres_loc[loc] = sol1[loc].getNumRows(); numvar_loc[loc] = sol1[loc].getNumCols(); for(j=0;j ncols_max_loc) {ncols_max_loc = sol1[loc].getNumCols();} } } if (npr > 1) { MPI_Allreduce(&ncols_max_loc,&ncols, 1, MPI_INT, MPI_MAX,new_comm); } else { ncols=ncols_max_loc;} double **x0;x0=new double*[nmodel]; for(i=0;i 1) { MPI_Allgatherv(&istruef969_loc[0], assignment[pid], MPI_INT, &istruef969[0],assignment, inicial, MPI_INT, new_comm); MPI_Gatherv(&zq_loc[0], assignment[pid], MPI_INT, &zq[0], assignment, inicial, MPI_INT, 0, new_comm); } else { for(imod=0;imod