Newer
Older
#pragma once
Ian Bell
committed
#include <string>
Ian Bell
committed
#include <optional>
#include <variant>
Ian Bell
committed
#include "teqp/types.hpp"
#include "MultiComplex/MultiComplex.hpp"
#include "multifluid_eosterms.hpp"
// See https://eigen.tuxfamily.org/dox/TopicCustomizing_CustomScalar.html
namespace Eigen {
template<typename TN> struct NumTraits<mcx::MultiComplex<TN>> : NumTraits<double> // permits to get the epsilon, dummy_precision, lowest, highest functions
{
enum {
IsComplex = 1,
IsInteger = 0,
IsSigned = 1,
RequireInitialization = 1,
ReadCost = 1,
AddCost = 3,
MulCost = 3
};
};
}
template<typename EOSCollection>
class CorrespondingStatesContribution {
private:
const EOSCollection EOSs;
public:
CorrespondingStatesContribution(EOSCollection&& EOSs) : EOSs(EOSs) {};
template<typename TauType, typename DeltaType, typename MoleFractions>
auto alphar(const TauType& tau, const DeltaType& delta, const MoleFractions& molefracs) const {
using resulttype = std::common_type_t<decltype(tau), decltype(molefracs[0]), decltype(delta)>; // Type promotion, without the const-ness
resulttype alphar = 0.0;
auto N = molefracs.size();
for (auto i = 0; i < N; ++i) {
alphar = alphar + molefracs[i] * EOSs[i].alphar(tau, delta);
}
}
template<typename TauType, typename DeltaType>
auto alphari(const TauType& tau, const DeltaType& delta, std::size_t i) const {
using resulttype = std::common_type_t<decltype(tau), decltype(delta)>; // Type promotion, without the const-ness
return EOSs[i].alphar(tau, delta);
}
auto get_EOS(std::size_t i) const{
return EOSs[i];
}
};
template<typename FCollection, typename DepartureFunctionCollection>
class DepartureContribution {
private:
const FCollection F;
const DepartureFunctionCollection funcs;
public:
DepartureContribution(FCollection&& F, DepartureFunctionCollection&& funcs) : F(F), funcs(funcs) {};
template<typename TauType, typename DeltaType, typename MoleFractions>
auto alphar(const TauType& tau, const DeltaType& delta, const MoleFractions& molefracs) const {
using resulttype = std::common_type_t<decltype(tau), decltype(molefracs[0]), decltype(delta)>; // Type promotion, without the const-ness
resulttype alphar = 0.0;
auto N = molefracs.size();
for (auto i = 0; i < N; ++i) {
Ian Bell
committed
for (auto j = i+1; j < N; ++j) {
alphar = alphar + molefracs[i] * molefracs[j] * F(i, j) * funcs[i][j].alphar(tau, delta);
}
}
}
};
template<typename ReducingFunction, typename CorrespondingTerm, typename DepartureTerm>
private:
std::string meta = ""; ///< A string that can be used to store arbitrary metadata as needed
const ReducingFunction redfunc;
const CorrespondingTerm corr;
const DepartureTerm dep;
template<class VecType>
auto R(const VecType& molefrac) const {
return get_R_gas<decltype(molefrac[0])>();
}
/// Store some sort of metadata in string form (perhaps a JSON representation of the model?)
void set_meta(const std::string& m) { meta = m; }
/// Get the metadata stored in string form
auto get_meta() const { return meta; }
MultiFluid(ReducingFunction&& redfunc, CorrespondingTerm&& corr, DepartureTerm&& dep) : redfunc(redfunc), corr(corr), dep(dep) {};
template<typename TType, typename RhoType>
auto alphar(TType T,
const RhoType& rhovec,
const std::optional<typename RhoType::value_type> rhotot = std::nullopt) const
{
typename RhoType::value_type rhotot_ = (rhotot.has_value()) ? rhotot.value() : std::accumulate(std::begin(rhovec), std::end(rhovec), (decltype(rhovec[0]))0.0);
auto molefrac = rhovec / rhotot_;
return alphar(T, rhotot_, molefrac);
}
template<typename TType, typename RhoType, typename MoleFracType>
Ian Bell
committed
auto alphar(const TType &T,
const RhoType &rho,
const MoleFracType& molefrac) const
{
auto Tred = forceeval(redfunc.get_Tr(molefrac));
auto rhored = forceeval(redfunc.get_rhor(molefrac));
auto delta = forceeval(rho / rhored);
auto tau = forceeval(Tred / T);
auto val = corr.alphar(tau, delta, molefrac) + dep.alphar(tau, delta, molefrac);
return forceeval(val);
}
};
}
template <typename Num>
auto square(Num x) const {
const Eigen::MatrixXd betaT, gammaT, betaV, gammaV;
const Eigen::ArrayXd Tc, vc;
template<typename ArrayLike>
MultiFluidReducingFunction(
const Eigen::MatrixXd& betaT, const Eigen::MatrixXd& gammaT,
const Eigen::MatrixXd& betaV, const Eigen::MatrixXd& gammaV,
const ArrayLike& Tc, const ArrayLike& vc)
: betaT(betaT), gammaT(gammaT), betaV(betaV), gammaV(gammaV), Tc(Tc), vc(vc) {
auto N = Tc.size();
YT.resize(N, N); YT.setZero();
Yv.resize(N, N); Yv.setZero();
for (auto i = 0; i < N; ++i) {
for (auto j = i + 1; j < N; ++j) {
YT(i, j) = betaT(i, j) * gammaT(i, j) * sqrt(Tc[i] * Tc[j]);
YT(j, i) = betaT(j, i) * gammaT(j, i) * sqrt(Tc[i] * Tc[j]);
Yv(i, j) = 1.0 / 8.0 * betaV(i, j) * gammaV(i, j) * cube(cbrt(vc[i]) + cbrt(vc[j]));
Yv(j, i) = 1.0 / 8.0 * betaV(j, i) * gammaV(j, i) * cube(cbrt(vc[i]) + cbrt(vc[j]));
}
}
}
template <typename MoleFractions>
auto Y(const MoleFractions& z, const Eigen::ArrayXd& Yc, const Eigen::MatrixXd& beta, const Eigen::MatrixXd& Yij) const {
Ian Bell
committed
for (auto i = 0; i < N; ++i) {
for (auto i = 0; i < N-1; ++i){
for (auto j = i+1; j < N; ++j) {
sum2 = sum2 + 2.0*z[i]*z[j]*(z[i] + z[j])/(square(beta(i, j))*z[i] + z[j])*Yij(i, j);
static auto get_BIPdep(const nlohmann::json& collection, const std::vector<std::string>& identifiers, const nlohmann::json& flags) {
if (flags.contains("estimate")) {
std::string scheme = flags["estimate"];
if (scheme == "Lorentz-Berthelot") {
return std::make_tuple(nlohmann::json({
{"betaT", 1.0}, {"gammaT", 1.0}, {"betaV", 1.0}, {"gammaV", 1.0}, {"F", 0.0}
}
else {
throw std::invalid_argument("estimation scheme is not understood:" + scheme);
}
Ian Bell
committed
// convert string to upper case
auto toupper = [](const std::string s){ auto data = s; std::for_each(data.begin(), data.end(), [](char& c) { c = ::toupper(c); }); return data;};
// First pass, check names
std::string comp0 = toupper(identifiers[0]);
std::string comp1 = toupper(identifiers[1]);
Ian Bell
committed
std::string name1 = toupper(el["Name1"]);
std::string name2 = toupper(el["Name2"]);
if (comp0 == name1 && comp1 == name2) {
Ian Bell
committed
if (comp0 == name2 && comp1 == name1) {
Ian Bell
committed
throw std::invalid_argument("Can't match this binary pair");
static auto get_binary_interaction_double(const nlohmann::json& collection, const std::vector<std::string>& components, const nlohmann::json& flags) {
auto [el, swap_needed] = get_BIPdep(collection, components, flags);
double betaT = el["betaT"], gammaT = el["gammaT"], betaV = el["betaV"], gammaV = el["gammaV"];
// Backwards order of components, flip beta values
betaT = 1.0 / betaT;
betaV = 1.0 / betaV;
}
return std::make_tuple(betaT, gammaT, betaV, gammaV);
}
static auto get_BIP_matrices(const nlohmann::json& collection, const std::vector<std::string>& components, const nlohmann::json& flags) {
Eigen::MatrixXd betaT, gammaT, betaV, gammaV, YT, Yv;
auto N = components.size();
betaT.resize(N, N); betaT.setZero();
gammaT.resize(N, N); gammaT.setZero();
betaV.resize(N, N); betaV.setZero();
gammaV.resize(N, N); gammaV.setZero();
for (auto i = 0; i < N; ++i) {
for (auto j = i + 1; j < N; ++j) {
auto [betaT_, gammaT_, betaV_, gammaV_] = get_binary_interaction_double(collection, { components[i], components[j] }, flags);
betaT(i, j) = betaT_; betaT(j, i) = 1.0 / betaT(i, j);
gammaT(i, j) = gammaT_; gammaT(j, i) = gammaT(i, j);
betaV(i, j) = betaV_; betaV(j, i) = 1.0 / betaV(i, j);
gammaV(i, j) = gammaV_; gammaV(j, i) = gammaV(i, j);
}
}
return std::make_tuple(betaT, gammaT, betaV, gammaV);
}
static auto get_Tcvc(const std::vector<nlohmann::json>& pureJSON) {
Eigen::ArrayXd Tc(pureJSON.size()), vc(pureJSON.size());
double Tc_ = red.at("T");
double rhoc_ = red.at("rhomolar");
static auto get_F_matrix(const nlohmann::json& collection, const std::vector<std::string>& identifiers, const nlohmann::json& flags) {
auto N = identifiers.size();
Eigen::MatrixXd F(N, N);
for (auto i = 0; i < N; ++i) {
F(i, i) = 0.0;
for (auto j = i + 1; j < N; ++j) {
auto [el, swap_needed] = get_BIPdep(collection, { identifiers[i], identifiers[j] }, flags);
Ian Bell
committed
if (el.empty()) {
F(i, j) = 0.0;
F(j, i) = 0.0;
}
else{
F(i, j) = el["F"];
F(j, i) = el["F"];
}
template<typename MoleFractions> auto get_Tr(const MoleFractions& molefracs) const { return Y(molefracs, Tc, betaT, YT); }
template<typename MoleFractions> auto get_rhor(const MoleFractions& molefracs) const { return 1.0 / Y(molefracs, vc, betaV, Yv); }
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
class MultiFluidInvariantReducingFunction {
private:
Eigen::MatrixXd YT, Yv;
template <typename Num> auto cube(Num x) const { return x * x * x; }
template <typename Num> auto square(Num x) const { return x * x; }
public:
const Eigen::MatrixXd phiT, lambdaT, phiV, lambdaV;
const Eigen::ArrayXd Tc, vc;
template<typename ArrayLike>
MultiFluidInvariantReducingFunction(
const Eigen::MatrixXd& phiT, const Eigen::MatrixXd& lambdaT,
const Eigen::MatrixXd& phiV, const Eigen::MatrixXd& lambdaV,
const ArrayLike& Tc, const ArrayLike& vc)
: phiT(phiT), lambdaT(lambdaT), phiV(phiV), lambdaV(lambdaV), Tc(Tc), vc(vc) {
auto N = Tc.size();
YT.resize(N, N); YT.setZero();
Yv.resize(N, N); Yv.setZero();
for (auto i = 0; i < N; ++i) {
for (auto j = 0; j < N; ++j) {
YT(i, j) = sqrt(Tc[i] * Tc[j]);
YT(j, i) = sqrt(Tc[i] * Tc[j]);
Yv(i, j) = 1.0 / 8.0 * cube(cbrt(vc[i]) + cbrt(vc[j]));
Yv(j, i) = 1.0 / 8.0 * cube(cbrt(vc[i]) + cbrt(vc[j]));
}
}
}
template <typename MoleFractions>
auto Y(const MoleFractions& z, const Eigen::MatrixXd& phi, const Eigen::MatrixXd& lambda, const Eigen::MatrixXd& Yij) const {
auto N = z.size();
typename MoleFractions::value_type sum = 0.0;
for (auto i = 0; i < N; ++i) {
for (auto j = 0; j < N; ++j) {
auto contrib = z[i] * z[j] * (phi(i, j) + z[j] * lambda(i, j)) * Yij(i, j);
sum += contrib;
}
}
return sum;
}
template<typename MoleFractions> auto get_Tr(const MoleFractions& molefracs) const { return Y(molefracs, phiT, lambdaT, YT); }
template<typename MoleFractions> auto get_rhor(const MoleFractions& molefracs) const { return 1.0 / Y(molefracs, phiV, lambdaV, Yv); }
};
inline auto build_departure_function(const nlohmann::json& j) {
Ian Bell
committed
auto build_power = [&](auto term, auto& dep) {
std::size_t N = term["n"].size();
PowerEOSTerm eos;
auto eigorzero = [&term, &N](const std::string& name) -> Eigen::ArrayXd {
if (!term[name].empty()) {
return toeig(term[name]);
}
else {
return Eigen::ArrayXd::Zero(N);
}
};
eos.n = eigorzero("n");
eos.t = eigorzero("t");
eos.d = eigorzero("d");
Eigen::ArrayXd c(N), l(N); c.setZero();
Ian Bell
committed
int Nlzero = 0, Nlnonzero = 0;
bool contiguous_lzero;
if (term["l"].empty()) {
// exponential part not included
l.setZero();
if (!all_same_length(term, { "n","t","d" })) {
Ian Bell
committed
throw std::invalid_argument("Lengths are not all identical in polynomial-like term");
Ian Bell
committed
else {
if (!all_same_length(term, { "n","t","d","l"})) {
throw std::invalid_argument("Lengths are not all identical in exponential term");
}
l = toeig(term["l"]);
// l is included, use it to build c; c_i = 1 if l_i > 0, zero otherwise
for (auto i = 0; i < c.size(); ++i) {
if (l[i] > 0) {
c[i] = 1.0;
}
}
Ian Bell
committed
// See how many of the first entries have zero values for l_i
contiguous_lzero = (l[0] == 0);
for (auto i = 0; i < c.size(); ++i) {
if (l[i] == 0) {
Nlzero++;
}
}
}
Nlnonzero = l.size() - Nlzero;
if ((l[0] != 0) && (l[l.size() - 1] == 0)) {
throw std::invalid_argument("If l_i has zero and non-zero values, the zero values need to come first");
Ian Bell
committed
}
Ian Bell
committed
Ian Bell
committed
Ian Bell
committed
Ian Bell
committed
if (Nlzero + Nlnonzero != l.size()) {
throw std::invalid_argument("Somehow the l lengths don't add up");
}
if (((eos.l_i.cast<double>() - eos.l).cwiseAbs() > 0.0).any()) {
throw std::invalid_argument("Non-integer entry in l found");
}
Ian Bell
committed
Ian Bell
committed
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
// If a contiguous portion of the terms have values of l_i that are zero
// it is computationally advantageous to break up the evaluation into
// part that has just the n_i*tau^t_i*delta^d_i and the part with the
// exponential term exp(-delta^l_i)
if (l.sum() == 0) {
// No l term at all, just polynomial
JustPowerEOSTerm poly;
poly.n = eos.n;
poly.t = eos.t;
poly.d = eos.d;
dep.add_term(poly);
}
else if (l.sum() > 0 && contiguous_lzero){
JustPowerEOSTerm poly;
poly.n = eos.n.head(Nlzero);
poly.t = eos.t.head(Nlzero);
poly.d = eos.d.head(Nlzero);
dep.add_term(poly);
PowerEOSTerm e;
e.n = eos.n.tail(Nlnonzero);
e.t = eos.t.tail(Nlnonzero);
e.d = eos.d.tail(Nlnonzero);
e.c = eos.c.tail(Nlnonzero);
e.l = eos.l.tail(Nlnonzero);
dep.add_term(e);
}
else {
// Don't try to get too clever, just add the departure term
dep.add_term(eos);
}
Ian Bell
committed
auto build_gaussian = [&](auto& term) {
GaussianEOSTerm eos;
eos.n = toeig(term["n"]);
eos.t = toeig(term["t"]);
eos.d = toeig(term["d"]);
eos.eta = toeig(term["eta"]);
eos.beta = toeig(term["beta"]);
eos.gamma = toeig(term["gamma"]);
eos.epsilon = toeig(term["epsilon"]);
if (!all_same_length(term, { "n","t","d","eta","beta","gamma","epsilon" })) {
throw std::invalid_argument("Lengths are not all identical in Gaussian term");
Ian Bell
committed
}
auto build_GERG2004 = [&](const auto& term, auto& dep) {
if (!all_same_length(term, { "n","t","d","eta","beta","gamma","epsilon" })) {
throw std::invalid_argument("Lengths are not all identical in GERG term");
int Npower = term["Npower"];
auto NGERG = static_cast<int>(term["n"].size()) - Npower;
PowerEOSTerm eos;
eos.n = toeig(term["n"]).head(Npower);
eos.t = toeig(term["t"]).head(Npower);
eos.d = toeig(term["d"]).head(Npower);
if (term.contains("l")) {
eos.l = toeig(term["l"]).head(Npower);
}
else {
eos.l = 0.0 * eos.n;
}
eos.c = (eos.l > 0).cast<int>().cast<double>();
eos.l_i = eos.l.cast<int>();
dep.add_term(eos);
GERG2004EOSTerm e;
e.n = toeig(term["n"]).tail(NGERG);
e.t = toeig(term["t"]).tail(NGERG);
e.d = toeig(term["d"]).tail(NGERG);
e.eta = toeig(term["eta"]).tail(NGERG);
e.beta = toeig(term["beta"]).tail(NGERG);
e.gamma = toeig(term["gamma"]).tail(NGERG);
e.epsilon = toeig(term["epsilon"]).tail(NGERG);
dep.add_term(e);
};
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
auto build_GaussianExponential = [&](const auto& term, auto& dep) {
if (!all_same_length(term, { "n","t","d","eta","beta","gamma","epsilon" })) {
throw std::invalid_argument("Lengths are not all identical in Gaussian+Exponential term");
}
int Npower = term["Npower"];
auto NGauss = static_cast<int>(term["n"].size()) - Npower;
PowerEOSTerm eos;
eos.n = toeig(term["n"]).head(Npower);
eos.t = toeig(term["t"]).head(Npower);
eos.d = toeig(term["d"]).head(Npower);
if (term.contains("l")) {
eos.l = toeig(term["l"]).head(Npower);
}
else {
eos.l = 0.0 * eos.n;
}
eos.c = (eos.l > 0).cast<int>().cast<double>();
eos.l_i = eos.l.cast<int>();
dep.add_term(eos);
GaussianEOSTerm e;
e.n = toeig(term["n"]).tail(NGauss);
e.t = toeig(term["t"]).tail(NGauss);
e.d = toeig(term["d"]).tail(NGauss);
e.eta = toeig(term["eta"]).tail(NGauss);
e.beta = toeig(term["beta"]).tail(NGauss);
e.gamma = toeig(term["gamma"]).tail(NGauss);
e.epsilon = toeig(term["epsilon"]).tail(NGauss);
dep.add_term(e);
};
DepartureTerms dep;
if (type == "Exponential") {
Ian Bell
committed
build_power(j, dep);
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
}
else if (type == "GERG-2004" || type == "GERG-2008") {
build_GERG2004(j, dep);
}
else if (type == "Gaussian+Exponential") {
build_GaussianExponential(j, dep);
}
else if (type == "none") {
dep.add_term(NullEOSTerm());
}
else {
throw std::invalid_argument("Bad departure term type: " + type);
}
return dep;
}
inline auto get_departure_function_matrix(const std::string& coolprop_root, const nlohmann::json& BIPcollection, const std::vector<std::string>& components, const nlohmann::json& flags) {
// Allocate the matrix with default models
std::vector<std::vector<DepartureTerms>> funcs(components.size()); for (auto i = 0; i < funcs.size(); ++i) { funcs[i].resize(funcs.size()); }
// Load the collection of data on departure functions
auto depcollection = nlohmann::json::parse(std::ifstream(coolprop_root + "/dev/mixtures/mixture_departure_functions.json"));
auto get_departure_json = [&depcollection](const std::string& Name) {
for (auto& el : depcollection) {
if (el["Name"] == Name) { return el; }
throw std::invalid_argument("Bad argument");
Ian Bell
committed
};
for (auto i = 0; i < funcs.size(); ++i) {
for (auto j = i + 1; j < funcs.size(); ++j) {
auto [BIP, swap_needed] = MultiFluidReducingFunction::get_BIPdep(BIPcollection, { components[i], components[j] }, flags);
std::string funcname = BIP.contains("function") ? BIP["function"] : "";
auto jj = get_departure_json(funcname);
funcs[i][j] = build_departure_function(jj);
funcs[j][i] = build_departure_function(jj);
Ian Bell
committed
}
funcs[i][j].add_term(NullEOSTerm());
funcs[j][i].add_term(NullEOSTerm());
Ian Bell
committed
}
}
return funcs;
}
inline auto get_EOS_terms(const nlohmann::json& j)
Ian Bell
committed
{
auto alphar = j["EOS"][0]["alphar"];
const std::vector<std::string> allowed_types = { "ResidualHelmholtzPower", "ResidualHelmholtzGaussian", "ResidualHelmholtzNonAnalytic","ResidualHelmholtzGaoB", "ResidualHelmholtzLemmon2005", "ResidualHelmholtzExponential" };
auto isallowed = [&](const auto& conventional_types, const std::string& name) {
for (auto& a : conventional_types) { if (name == a) { return true; }; } return false;
Ian Bell
committed
for (auto& term : alphar) {
std::string type = term["type"];
if (!isallowed(allowed_types, type)) {
std::string a = allowed_types[0]; for (auto i = 1; i < allowed_types.size(); ++i) { a += "," + allowed_types[i]; }
throw std::invalid_argument("Bad type:" + type + "; allowed types are: {" + a + "}");
Ian Bell
committed
}
}
EOSTerms container;
Ian Bell
committed
auto build_power = [&](auto term, auto & container) {
std::size_t N = term["n"].size();
PowerEOSTerm eos;
auto eigorzero = [&term, &N](const std::string& name) -> Eigen::ArrayXd {
Ian Bell
committed
if (!term[name].empty()) {
return toeig(term[name]);
}
else {
return Eigen::ArrayXd::Zero(N);
}
Ian Bell
committed
eos.n = eigorzero("n");
eos.t = eigorzero("t");
eos.d = eigorzero("d");
Ian Bell
committed
Eigen::ArrayXd c(N), l(N); c.setZero();
Ian Bell
committed
int Nlzero = 0, Nlnonzero = 0;
bool contiguous_lzero;
Ian Bell
committed
if (term["l"].empty()) {
// exponential part not included
l.setZero();
}
else {
l = toeig(term["l"]);
// l is included, use it to build c; c_i = 1 if l_i > 0, zero otherwise
for (auto i = 0; i < c.size(); ++i) {
if (l[i] > 0) {
c[i] = 1.0;
}
}
Ian Bell
committed
// See how many of the first entries have zero values for l_i
contiguous_lzero = (l[0] == 0);
for (auto i = 0; i < c.size(); ++i) {
if (l[i] == 0) {
Nlzero++;
}
}
Ian Bell
committed
}
Ian Bell
committed
Nlnonzero = l.size() - Nlzero;
eos.c = c;
eos.l = l;
Ian Bell
committed
eos.l_i = eos.l.cast<int>();
Ian Bell
committed
if (Nlzero + Nlnonzero != l.size()) {
throw std::invalid_argument("Somehow the l lengths don't add up");
}
if (((eos.l_i.cast<double>() - eos.l).cwiseAbs() > 0.0).any()) {
throw std::invalid_argument("Non-integer entry in l found");
}
Ian Bell
committed
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
// If a contiguous portion of the terms have values of l_i that are zero
// it is computationally advantageous to break up the evaluation into
// part that has just the n_i*tau^t_i*delta^d_i and the part with the
// exponential term exp(-delta^l_i)
if (l.sum() == 0) {
// No l term at all, just polynomial
JustPowerEOSTerm poly;
poly.n = eos.n;
poly.t = eos.t;
poly.d = eos.d;
container.add_term(poly);
}
else if (l.sum() > 0 && contiguous_lzero) {
JustPowerEOSTerm poly;
poly.n = eos.n.head(Nlzero);
poly.t = eos.t.head(Nlzero);
poly.d = eos.d.head(Nlzero);
container.add_term(poly);
PowerEOSTerm e;
e.n = eos.n.tail(Nlnonzero);
e.t = eos.t.tail(Nlnonzero);
e.d = eos.d.tail(Nlnonzero);
e.c = eos.c.tail(Nlnonzero);
e.l = eos.l.tail(Nlnonzero);
e.l_i = e.l.cast<int>();
container.add_term(e);
}
else {
// Don't try to get too clever, just add the term
container.add_term(eos);
}
};
auto build_Lemmon2005 = [&](auto term) {
Lemmon2005EOSTerm eos;
eos.n = toeig(term["n"]);
eos.t = toeig(term["t"]);
eos.d = toeig(term["d"]);
eos.m = toeig(term["m"]);
eos.l = toeig(term["l"]);
if (!all_same_length(term, { "n","t","d","m","l" })) {
throw std::invalid_argument("Lengths are not all identical in Lemmon2005 term");
}
if (((eos.l_i.cast<double>() - eos.l).cwiseAbs() > 0.0).any()) {
throw std::invalid_argument("Non-integer entry in l found");
}
return eos;
};
auto build_gaussian = [&](auto term) {
GaussianEOSTerm eos;
eos.n = toeig(term["n"]);
eos.t = toeig(term["t"]);
eos.d = toeig(term["d"]);
eos.eta = toeig(term["eta"]);
eos.beta = toeig(term["beta"]);
eos.gamma = toeig(term["gamma"]);
eos.epsilon = toeig(term["epsilon"]);
if (!all_same_length(term, { "n","t","d","eta","beta","gamma","epsilon" })) {
throw std::invalid_argument("Lengths are not all identical in Gaussian term");
}
return eos;
};
auto build_exponential = [&](auto term) {
ExponentialEOSTerm eos;
eos.n = toeig(term["n"]);
eos.t = toeig(term["t"]);
eos.d = toeig(term["d"]);
eos.g = toeig(term["g"]);
eos.l = toeig(term["l"]);
eos.l_i = eos.l.cast<int>();
if (!all_same_length(term, { "n","t","d","g","l" })) {
throw std::invalid_argument("Lengths are not all identical in exponential term");
}
return eos;
};
auto build_GaoB = [&](auto term) {
GaoBEOSTerm eos;
eos.n = toeig(term["n"]);
eos.t = toeig(term["t"]);
eos.d = toeig(term["d"]);
eos.eta = -toeig(term["eta"]); // Watch out for this sign flip!!
eos.beta = toeig(term["beta"]);
eos.gamma = toeig(term["gamma"]);
eos.epsilon = toeig(term["epsilon"]);
eos.b = toeig(term["b"]);
if (!all_same_length(term, { "n","t","d","eta","beta","gamma","epsilon","b" })) {
throw std::invalid_argument("Lengths are not all identical in GaoB term");
}
/// lambda function for adding non-analytic terms
auto build_na = [&](auto& term) {
NonAnalyticEOSTerm eos;
eos.n = toeig(term["n"]);
eos.A = toeig(term["A"]);
eos.B = toeig(term["B"]);
eos.C = toeig(term["C"]);
eos.D = toeig(term["D"]);
eos.a = toeig(term["a"]);
eos.b = toeig(term["b"]);
eos.beta = toeig(term["beta"]);
if (!all_same_length(term, { "n","A","B","C","D","a","b","beta" })) {
throw std::invalid_argument("Lengths are not all identical in nonanalytic term");
}
return eos;
};
for (auto& term : alphar) {
if (type == "ResidualHelmholtzPower") {
Ian Bell
committed
build_power(term, container);
}
else if (type == "ResidualHelmholtzGaussian") {
container.add_term(build_gaussian(term));
}
else if (type == "ResidualHelmholtzNonAnalytic") {
container.add_term(build_na(term));
}
else if (type == "ResidualHelmholtzLemmon2005") {
container.add_term(build_Lemmon2005(term));
}
else if (type == "ResidualHelmholtzGaoB") {
container.add_term(build_GaoB(term));
}
else if (type == "ResidualHelmholtzExponential") {
container.add_term(build_exponential(term));
}
else {
throw std::invalid_argument("Bad term type: "+type);
return container;
Ian Bell
committed
}
inline auto get_EOSs(const std::vector<nlohmann::json>& pureJSON) {
std::vector<EOSTerms> EOSs;
for (auto& j : pureJSON) {
auto term = get_EOS_terms(j);
EOSs.emplace_back(term);
Ian Bell
committed
}
return EOSs;
}
inline auto collect_component_json(const std::vector<std::string>& components, const std::string& root)
{
std::vector<nlohmann::json> out;
for (auto c : components) {
std::vector<std::filesystem::path> candidates = { c, root + "/dev/fluids/" + c + ".json" };
std::filesystem::path selected_path = "";
for (auto candidate : candidates) {
if (std::filesystem::exists(candidate)) {
selected_path = candidate;
break;
}
}
if (selected_path != "") {
std::ifstream ifs(selected_path);
auto j = nlohmann::json::parse(ifs);
out.push_back(j);
}
else {
throw std::invalid_argument("Could not load the fluid:" + c);
}
}
return out;
}
inline auto collect_identifiers(const std::vector<nlohmann::json>& pureJSON)
{
std::vector<std::string> identifiers;
for (auto j : pureJSON) {
std::string CAS = j.at("INFO").at("CAS");
std::string name = j.at("INFO").at("NAME");
identifiers.push_back(name);
}
return identifiers;
}
Ian Bell
committed
inline auto build_multifluid_model(const std::vector<std::string>& components, const std::string& coolprop_root, const std::string& BIPcollectionpath, const nlohmann::json& flags = {}) {
auto stream = std::ifstream(BIPcollectionpath);
if (!stream) {
throw std::invalid_argument("Cannot open BIP collection file: " + BIPcollectionpath);
}
const auto BIPcollection = nlohmann::json::parse(stream);
auto pureJSON = collect_component_json(components, coolprop_root);
auto [Tc, vc] = MultiFluidReducingFunction::get_Tcvc(pureJSON);
auto EOSs = get_EOSs(pureJSON);
// Extract the identifiers to be used
auto identifiers = collect_identifiers(pureJSON);
// Things related to the mixture
auto F = MultiFluidReducingFunction::get_F_matrix(BIPcollection, identifiers, flags);
auto funcs = get_departure_function_matrix(coolprop_root, BIPcollection, identifiers, flags);
auto [betaT, gammaT, betaV, gammaV] = MultiFluidReducingFunction::get_BIP_matrices(BIPcollection, identifiers, flags);
auto redfunc = MultiFluidReducingFunction(betaT, gammaT, betaV, gammaV, Tc, vc);
return MultiFluid(
std::move(redfunc),
std::move(CorrespondingStatesContribution(std::move(EOSs))),
std::move(DepartureContribution(std::move(F), std::move(funcs)))
);
}
Ian Bell
committed
/**
This class holds a lightweight reference to the core parts of the model
The reducing and departure functions are moved into this class, while the donor class is used for the corresponding states portion
*/
template<typename ReducingFunction, typename DepartureFunction, typename BaseClass>
class MultiFluidAdapter {
private:
std::string meta = "";
Ian Bell
committed
public:
const BaseClass& base;
const ReducingFunction redfunc;
const DepartureFunction depfunc;
Ian Bell
committed
template<class VecType>
auto R(const VecType& molefrac) const { return base.R(molefrac); }
Ian Bell
committed
MultiFluidAdapter(const BaseClass& base, ReducingFunction&& redfunc, DepartureFunction &&depfunc) : base(base), redfunc(redfunc), depfunc(depfunc) {};
/// Store some sort of metadata in string form (perhaps a JSON representation of the model?)
void set_meta(const std::string& m) { meta = m; }
/// Get the metadata stored in string form
auto get_meta() const { return meta; }
Ian Bell
committed
template<typename TType, typename RhoType, typename MoleFracType>
auto alphar(const TType& T,
const RhoType& rho,
const MoleFracType& molefrac) const
{
auto Tred = forceeval(redfunc.get_Tr(molefrac));
auto rhored = forceeval(redfunc.get_rhor(molefrac));
auto delta = forceeval(rho / rhored);
auto tau = forceeval(Tred / T);
auto val = base.corr.alphar(tau, delta, molefrac) + depfunc.alphar(tau, delta, molefrac);
return forceeval(val);
}
};
Ian Bell
committed
template<class Model>
auto build_multifluid_mutant(Model& model, const nlohmann::json& jj) {
Ian Bell
committed
auto red = model.redfunc;
auto N = red.Tc.size();
auto betaT = red.betaT, gammaT = red.gammaT, betaV = red.betaV, gammaV = red.gammaV;
Ian Bell
committed
auto Tc = red.Tc, vc = red.vc;
// Allocate the matrices of default models and F factors
Eigen::MatrixXd F(N, N); F.setZero();
std::vector<std::vector<DepartureTerms>> funcs(N);
for (auto i = 0; i < N; ++i) { funcs[i].resize(N); }
for (auto i = 0; i < N; ++i) {
for (auto j = i; j < N; ++j) {
if (i == j) {
funcs[i][i].add_term(NullEOSTerm());
}
else {
// Extract the given entry
auto entry = jj[std::to_string(i)][std::to_string(j)];
auto BIP = entry["BIP"];
// Set the reducing function parameters in the copy
betaT(j, i) = 1 / red.betaT(i, j);
betaV(j, i) = 1 / red.betaV(i, j);
gammaT(i, j) = BIP.at("gammaT"); gammaT(j, i) = gammaT(i, j);
gammaV(i, j) = BIP.at("gammaV"); gammaV(j, i) = gammaV(i, j);
// Build the matrix of F and departure functions
auto dep = entry["departure"];
F(j, i) = F(i, j);
funcs[i][j] = build_departure_function(dep);
funcs[j][i] = build_departure_function(dep);
}
Ian Bell
committed
}
}
auto newred = MultiFluidReducingFunction(betaT, gammaT, betaV, gammaV, Tc, vc);
auto newdep = DepartureContribution(std::move(F), std::move(funcs));
auto mfa = MultiFluidAdapter(model, std::move(newred), std::move(newdep));
/// Store the departure term in the adapted multifluid class
mfa.set_meta(jj.dump());
return mfa;
Ian Bell
committed
}
template<typename Model>
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
auto build_multifluid_mutant_invariant(Model& model, const nlohmann::json& jj) {
auto red = model.redfunc;
auto N = red.Tc.size();
if (N != 2) {
throw std::invalid_argument("Only binary mixtures are currently supported with invariant departure functions");
}
auto phiT = red.betaT, lambdaT = red.gammaT, phiV = red.betaV, lambdaV = red.gammaV;
phiT.setOnes(); phiV.setOnes();
lambdaV.setZero(); lambdaV.setZero();
auto Tc = red.Tc, vc = red.vc;
// Allocate the matrices of default models and F factors
Eigen::MatrixXd F(N, N); F.setZero();
std::vector<std::vector<DepartureTerms>> funcs(N);
for (auto i = 0; i < N; ++i) { funcs[i].resize(N); }
for (auto i = 0; i < N; ++i) {
for (auto j = i; j < N; ++j) {
if (i == j) {
funcs[i][i].add_term(NullEOSTerm());
}
else {
// Extract the given entry
auto entry = jj[std::to_string(i)][std::to_string(j)];
auto BIP = entry["BIP"];
// Set the reducing function parameters in the copy
phiT(j, i) = phiT(i, j);
lambdaT(j, i) = -lambdaT(i, j);
phiV(j, i) = phiV(i, j);
lambdaV(j, i) = -lambdaV(i, j);
// Build the matrix of F and departure functions
auto dep = entry["departure"];
F(j, i) = F(i, j);
funcs[i][j] = build_departure_function(dep);
funcs[j][i] = build_departure_function(dep);