script optimization for record operations sourced (in part) from other records

This commit is contained in:
Vern Paxson 2024-05-10 20:22:04 -07:00 committed by Arne Welzel
parent eb5ea66012
commit 4b719ef45a
21 changed files with 953 additions and 50 deletions

View file

@ -99,11 +99,14 @@ const char* expr_name(ExprTag t) {
"vec+=",
"[]=",
"$=",
"from_any_vec_coerce ",
"$=$",
"$+=$",
"[=+$]",
"from_any_vec_coerce",
"any[]",
"ZAM-builtin()",
"nop",
"nop", // don't add after this, it's used to compute NUM_EXPRS
};
if ( int(t) >= NUM_EXPRS ) {
@ -2916,7 +2919,8 @@ RecordConstructorExpr::RecordConstructorExpr(ListExprPtr constructor_list)
Error("bad type in record constructor", constructor_error_expr);
}
RecordConstructorExpr::RecordConstructorExpr(RecordTypePtr known_rt, ListExprPtr constructor_list)
RecordConstructorExpr::RecordConstructorExpr(RecordTypePtr known_rt, ListExprPtr constructor_list,
bool check_mandatory_fields)
: Expr(EXPR_RECORD_CONSTRUCTOR), op(std::move(constructor_list)) {
if ( IsError() )
return;
@ -2957,6 +2961,9 @@ RecordConstructorExpr::RecordConstructorExpr(RecordTypePtr known_rt, ListExprPtr
if ( IsError() )
return;
if ( ! check_mandatory_fields )
return;
auto n = known_rt->NumFields();
for ( i = 0; i < n; ++i )
if ( fields_seen.count(i) == 0 ) {

View file

@ -103,6 +103,9 @@ enum ExprTag : int {
EXPR_APPEND_TO,
EXPR_INDEX_ASSIGN,
EXPR_FIELD_LHS_ASSIGN,
EXPR_REC_ASSIGN_FIELDS,
EXPR_REC_ADD_FIELDS,
EXPR_REC_CONSTRUCT_WITH_REC,
EXPR_FROM_ANY_VEC_COERCE,
EXPR_ANY_INDEX,
EXPR_SCRIPT_OPT_BUILTIN,
@ -1166,7 +1169,10 @@ public:
explicit RecordConstructorExpr(ListExprPtr constructor_list);
// This form is used to construct records of a known (ultimate) type.
explicit RecordConstructorExpr(RecordTypePtr known_rt, ListExprPtr constructor_list);
// The flag allows skipping of checking for mandatory fields, for
// script optimization that may elide them.
explicit RecordConstructorExpr(RecordTypePtr known_rt, ListExprPtr constructor_list,
bool check_mandatory_fields = true);
ListExprPtr Op() const { return op; }
const auto& Map() const { return map; }

View file

@ -133,6 +133,7 @@ TraversalCode CSE_ValidityChecker::PreExpr(const Expr* e) {
case EXPR_RECORD_COERCE:
case EXPR_RECORD_CONSTRUCTOR:
case EXPR_REC_CONSTRUCT_WITH_REC:
// Note, record coercion behaves like constructors in terms of
// potentially executing &default functions. In either case,
// the type of the expression reflects the type we want to analyze

View file

@ -1783,7 +1783,7 @@ ExprPtr RecordConstructorExpr::Duplicate() {
if ( map ) {
auto rt = cast_intrusive<RecordType>(type);
return SetSucc(new RecordConstructorExpr(rt, op_l));
return SetSucc(new RecordConstructorExpr(rt, op_l, false));
}
else
return SetSucc(new RecordConstructorExpr(op_l));
@ -1807,6 +1807,11 @@ bool RecordConstructorExpr::HasReducedOps(Reducer* c) const {
}
ExprPtr RecordConstructorExpr::Reduce(Reducer* c, StmtPtr& red_stmt) {
if ( ConstructFromRecordExpr::FindMostCommonRecordSource(op) ) {
auto cfr = with_location_of(make_intrusive<ConstructFromRecordExpr>(this), this);
return cfr->Reduce(c, red_stmt);
}
red_stmt = ReduceToSingletons(c);
if ( c->Optimizing() )
@ -2846,6 +2851,249 @@ void FieldLHSAssignExpr::ExprDescribe(ODesc* d) const {
op2->Describe(d);
}
// Helper functions.
// This first one mines out of a given statement in an assignment chain the
// variable that occurs as a LHS target, so 'x' for "x$foo = y$bar".
static NameExprPtr get_RFU_LHS_var(const Stmt* s) {
auto s_e = s->AsExprStmt()->StmtExpr();
auto var = s_e->GetOp1()->GetOp1()->GetOp1();
ASSERT(var->Tag() == EXPR_NAME);
return cast_intrusive<NameExpr>(var);
}
// This one mines out the RHS, so 'y' for "x$foo = y$bar", or for
// "x$foo = x$foo + y$bar" (which is what "x$foo += y$bar" is at this point).
static NameExprPtr get_RFU_RHS_var(const Stmt* s) {
auto s_e = s->AsExprStmt()->StmtExpr();
auto rhs = s_e->GetOp2();
ExprPtr var;
if ( rhs->Tag() == EXPR_FIELD )
var = rhs->GetOp1();
else
var = rhs->GetOp2()->GetOp1();
ASSERT(var->Tag() == EXPR_NAME);
return cast_intrusive<NameExpr>(var);
}
RecordFieldUpdatesExpr::RecordFieldUpdatesExpr(ExprTag t, const std::vector<const Stmt*>& stmts,
std::set<const Stmt*>& stmt_pool)
: BinaryExpr(t, get_RFU_LHS_var(stmts[0]), get_RFU_RHS_var(stmts[0])) {
// Build up the LHS map (record fields we're assigning/adding) and RHS map
// (record fields from which we're assigning).
for ( auto s : stmts ) {
auto s_e = s->AsExprStmt()->StmtExpr();
auto lhs = s_e->GetOp1()->GetOp1();
auto lhs_field = lhs->AsFieldExpr()->Field();
auto rhs = s_e->GetOp2();
if ( rhs->Tag() != EXPR_FIELD )
// It's "x$foo = x$foo + y$bar".
rhs = rhs->GetOp2();
auto rhs_field = rhs->AsFieldExpr()->Field();
lhs_map.push_back(lhs_field);
rhs_map.push_back(rhs_field);
// Consistency check that the statement is indeed in the pool,
// before we remove it.
ASSERT(stmt_pool.count(s) > 0);
stmt_pool.erase(s);
}
}
RecordFieldUpdatesExpr::RecordFieldUpdatesExpr(ExprTag t, ExprPtr e1, ExprPtr e2, std::vector<int> _lhs_map,
std::vector<int> _rhs_map)
: BinaryExpr(t, std::move(e1), std::move(e2)) {
lhs_map = std::move(_lhs_map);
rhs_map = std::move(_rhs_map);
}
ValPtr RecordFieldUpdatesExpr::Fold(Val* v1, Val* v2) const {
auto rv1 = v1->AsRecordVal();
auto rv2 = v2->AsRecordVal();
for ( size_t i = 0; i < lhs_map.size(); ++i )
FoldField(rv1, rv2, i);
return nullptr;
}
bool RecordFieldUpdatesExpr::IsReduced(Reducer* c) const { return HasReducedOps(c); }
void RecordFieldUpdatesExpr::ExprDescribe(ODesc* d) const {
op1->Describe(d);
d->Add(expr_name(tag));
op2->Describe(d);
}
ExprPtr RecordFieldUpdatesExpr::Reduce(Reducer* c, StmtPtr& red_stmt) {
if ( c->Optimizing() ) {
op1 = c->UpdateExpr(op1);
op2 = c->UpdateExpr(op2);
}
red_stmt = nullptr;
if ( ! op1->IsSingleton(c) )
op1 = op1->ReduceToSingleton(c, red_stmt);
StmtPtr red2_stmt;
if ( ! op2->IsSingleton(c) )
op2 = op2->ReduceToSingleton(c, red2_stmt);
red_stmt = MergeStmts(red_stmt, std::move(red2_stmt));
return ThisPtr();
}
ExprPtr AssignRecordFieldsExpr::Duplicate() {
auto e1 = op1->Duplicate();
auto e2 = op2->Duplicate();
return SetSucc(new AssignRecordFieldsExpr(e1, e2, lhs_map, rhs_map));
}
void AssignRecordFieldsExpr::FoldField(RecordVal* rv1, RecordVal* rv2, size_t i) const {
rv1->Assign(lhs_map[i], rv2->GetField(rhs_map[i]));
}
ConstructFromRecordExpr::ConstructFromRecordExpr(const RecordConstructorExpr* orig)
: AssignRecordFieldsExpr(nullptr, nullptr, {}, {}) {
tag = EXPR_REC_CONSTRUCT_WITH_REC;
SetType(orig->GetType());
// Arguments used in original and final constructor.
auto& orig_args = orig->Op()->Exprs();
// The one we'll build up below:
auto args = with_location_of(make_intrusive<ListExpr>(), orig);
auto src_id = FindMostCommonRecordSource(orig->Op());
auto& map = orig->Map();
for ( size_t i = 0; i < orig_args.size(); ++i ) {
auto e = orig_args[i];
auto src = FindRecordSource(e);
if ( src && src->GetOp1()->AsNameExpr()->IdPtr() == src_id ) {
// "map" might be nil if we're optimize [$x = foo$bar].
lhs_map.push_back(map ? (*map)[i] : i);
rhs_map.push_back(src->Field());
}
else
args->Append({NewRef{}, e});
}
auto rt = cast_intrusive<RecordType>(orig->GetType());
op1 = with_location_of(make_intrusive<RecordConstructorExpr>(std::move(rt), std::move(args), false), orig);
op2 = with_location_of(make_intrusive<NameExpr>(std::move(src_id)), orig);
}
IDPtr ConstructFromRecordExpr::FindMostCommonRecordSource(const ListExprPtr& exprs) {
// Maps identifiers to how often they appear in the constructor's
// arguments as a field reference. Used to find the most common.
std::unordered_map<IDPtr, int> id_cnt;
for ( auto e : exprs->Exprs() ) {
auto src = FindRecordSource(e);
if ( src ) {
auto id = src->GetOp1()->AsNameExpr()->IdPtr();
++id_cnt[id] = 1;
#if 0
auto ic = id_cnt.find(id);
if ( ic == id_cnt.end() )
id_cnt[id] = 1;
else
++ic->second;
#endif
}
}
if ( id_cnt.empty() )
return nullptr;
// Return the most common.
auto max_entry = std::max_element(id_cnt.begin(), id_cnt.end(),
[](const std::pair<IDPtr, int>& p1, const std::pair<IDPtr, int>& p2) {
return p1.second < p2.second;
});
return max_entry->first;
}
FieldExprPtr ConstructFromRecordExpr::FindRecordSource(const Expr* const_e) {
// The following cast just saves us from having to define a "const" version
// of AsFieldAssignExprPtr().
auto e = const_cast<Expr*>(const_e);
const auto fa = e->AsFieldAssignExprPtr();
auto fa_rhs = e->GetOp1();
if ( fa_rhs->Tag() != EXPR_FIELD )
return nullptr;
auto rhs_rec = fa_rhs->GetOp1();
if ( rhs_rec->Tag() != EXPR_NAME )
return nullptr;
return cast_intrusive<FieldExpr>(fa_rhs);
}
ExprPtr ConstructFromRecordExpr::Duplicate() {
auto e1 = op1->Duplicate();
auto e2 = op2->Duplicate();
return SetSucc(new ConstructFromRecordExpr(e1, e2, lhs_map, rhs_map));
}
bool ConstructFromRecordExpr::IsReduced(Reducer* c) const { return op1->HasReducedOps(c) && op2->IsReduced(c); }
bool ConstructFromRecordExpr::HasReducedOps(Reducer* c) const { return IsReduced(c); }
ExprPtr ConstructFromRecordExpr::Reduce(Reducer* c, StmtPtr& red_stmt) {
if ( c->Optimizing() ) {
op1 = c->UpdateExpr(op1);
op2 = c->UpdateExpr(op2);
}
red_stmt = nullptr;
if ( ! op1->HasReducedOps(c) )
red_stmt = op1->ReduceToSingletons(c);
StmtPtr red2_stmt;
if ( ! op2->IsSingleton(c) )
op2 = op2->ReduceToSingleton(c, red2_stmt);
red_stmt = MergeStmts(red_stmt, std::move(red2_stmt));
if ( c->Optimizing() )
return ThisPtr();
else
return AssignToTemporary(c, red_stmt);
}
ExprPtr AddRecordFieldsExpr::Duplicate() {
auto e1 = op1->Duplicate();
auto e2 = op2->Duplicate();
return SetSucc(new AddRecordFieldsExpr(e1, e2, lhs_map, rhs_map));
}
void AddRecordFieldsExpr::FoldField(RecordVal* rv1, RecordVal* rv2, size_t i) const {
// The goal here is correctness, not efficiency, since normally this
// expression only exists temporarily before being compiled to ZAM.
// Doing it this way saves us from having to switch on the type of the '+'
// operands.
auto lhs_val = rv1->GetField(lhs_map[i]);
auto rhs_val = rv2->GetField(rhs_map[i]);
auto lhs_const = make_intrusive<ConstExpr>(lhs_val);
auto rhs_const = make_intrusive<ConstExpr>(rhs_val);
auto add_expr = make_intrusive<AddExpr>(lhs_const, rhs_const);
auto sum = add_expr->Eval(nullptr);
ASSERT(sum);
rv1->Assign(lhs_map[i], sum);
}
CoerceToAnyExpr::CoerceToAnyExpr(ExprPtr arg_op) : UnaryExpr(EXPR_TO_ANY_COERCE, std::move(arg_op)) {
type = base_type(TYPE_ANY);
}

View file

@ -104,6 +104,98 @@ protected:
int field;
};
// Base class for updating a number of record fields from fields in
// another record.
class RecordFieldUpdatesExpr : public BinaryExpr {
public:
const auto& LHSMap() const { return lhs_map; }
const auto& RHSMap() const { return rhs_map; }
// Only needed if we're transforming-but-not-compiling.
ValPtr Fold(Val* v1, Val* v2) const override;
bool IsPure() const override { return false; }
bool IsReduced(Reducer* c) const override;
ExprPtr Reduce(Reducer* c, StmtPtr& red_stmt) override;
protected:
RecordFieldUpdatesExpr(ExprTag t, const std::vector<const Stmt*>& stmts, std::set<const Stmt*>& stmt_pool);
RecordFieldUpdatesExpr(ExprTag t, ExprPtr e1, ExprPtr e2, std::vector<int> _lhs_map, std::vector<int> _rhs_map);
// Apply the operation for the given index 'i' from rv2 to rv1.
// Does not return a value since we're modifying rv1 in-place.
virtual void FoldField(RecordVal* rv1, RecordVal* rv2, size_t i) const = 0;
void ExprDescribe(ODesc* d) const override;
std::vector<int> lhs_map;
std::vector<int> rhs_map;
};
// Assign a bunch of record fields en masse from fields in another record.
class AssignRecordFieldsExpr : public RecordFieldUpdatesExpr {
public:
AssignRecordFieldsExpr(const std::vector<const Stmt*>& stmts, std::set<const Stmt*>& stmt_pool)
: RecordFieldUpdatesExpr(EXPR_REC_ASSIGN_FIELDS, stmts, stmt_pool) {}
ExprPtr Duplicate() override;
protected:
// Used for duplicating.
AssignRecordFieldsExpr(ExprPtr e1, ExprPtr e2, std::vector<int> _lhs_map, std::vector<int> _rhs_map)
: RecordFieldUpdatesExpr(EXPR_REC_ASSIGN_FIELDS, e1, e2, _lhs_map, _rhs_map) {}
void FoldField(RecordVal* rv1, RecordVal* rv2, size_t i) const override;
};
// Construct a record with some of the fields taken directly from another
// record. After full construction, the first operand is the base constructor
// (a subset of the original) and the second is the source record being used
// for some of the initialization.
using FieldExprPtr = IntrusivePtr<FieldExpr>;
class ConstructFromRecordExpr : public AssignRecordFieldsExpr {
public:
ConstructFromRecordExpr(const RecordConstructorExpr* orig);
// Helper function that finds the most common source value.
// Returns its identifier, or nil if there is no "$field = x$y"
// to leverage.
static IDPtr FindMostCommonRecordSource(const ListExprPtr& exprs);
ExprPtr Duplicate() override;
bool IsReduced(Reducer* c) const override;
bool HasReducedOps(Reducer* c) const override;
ExprPtr Reduce(Reducer* c, StmtPtr& red_stmt) override;
protected:
ConstructFromRecordExpr(ExprPtr e1, ExprPtr e2, std::vector<int> _lhs_map, std::vector<int> _rhs_map)
: AssignRecordFieldsExpr(e1, e2, _lhs_map, _rhs_map) {
tag = EXPR_REC_CONSTRUCT_WITH_REC;
}
// Helper function that for a given "$field = x$y" returns the
// "x$y" node, or nil if that's not the nature of the expression.
static FieldExprPtr FindRecordSource(const Expr* e);
};
// Add en masse fields from one record to fields in another record.
// We could add additional such expressions for other common operations
// like "x$foo -= y$bar", but in practice these are quite rare.
class AddRecordFieldsExpr : public RecordFieldUpdatesExpr {
public:
AddRecordFieldsExpr(const std::vector<const Stmt*>& stmts, std::set<const Stmt*>& stmt_pool)
: RecordFieldUpdatesExpr(EXPR_REC_ADD_FIELDS, stmts, stmt_pool) {}
ExprPtr Duplicate() override;
protected:
AddRecordFieldsExpr(ExprPtr e1, ExprPtr e2, std::vector<int> _lhs_map, std::vector<int> _rhs_map)
: RecordFieldUpdatesExpr(EXPR_REC_ADD_FIELDS, e1, e2, _lhs_map, _rhs_map) {}
void FoldField(RecordVal* rv1, RecordVal* rv2, size_t i) const override;
};
// ... and for conversion from a "vector of any" type.
class CoerceFromAnyVecExpr : public UnaryExpr {
public:

View file

@ -442,7 +442,8 @@ TraversalCode ProfileFunc::PreExpr(const Expr* e) {
return TC_ABORTSTMT;
}
case EXPR_RECORD_CONSTRUCTOR: CheckRecordConstructor(e->GetType()); break;
case EXPR_RECORD_CONSTRUCTOR:
case EXPR_REC_CONSTRUCT_WITH_REC: CheckRecordConstructor(e->GetType()); break;
case EXPR_SET_CONSTRUCTOR: {
auto sc = static_cast<const SetConstructorExpr*>(e);

View file

@ -57,7 +57,7 @@ static bool same_op(const Expr* op1, const Expr* op2, bool check_defs) {
return def_1 == def_2 && def_1 != NO_DEF;
}
else if ( op1->Tag() == EXPR_CONST ) {
if ( op1->Tag() == EXPR_CONST ) {
auto op1_c = op1->AsConstExpr();
auto op2_c = op2->AsConstExpr();
@ -67,7 +67,7 @@ static bool same_op(const Expr* op1, const Expr* op2, bool check_defs) {
return same_val(op1_v, op2_v);
}
else if ( op1->Tag() == EXPR_LIST ) {
if ( op1->Tag() == EXPR_LIST ) {
auto op1_l = op1->AsListExpr()->Exprs();
auto op2_l = op2->AsListExpr()->Exprs();
@ -81,8 +81,22 @@ static bool same_op(const Expr* op1, const Expr* op2, bool check_defs) {
return true;
}
reporter->InternalError("bad singleton tag");
return false;
// We only get here if dealing with non-reduced operands.
auto subop1_1 = op1->GetOp1();
auto subop1_2 = op2->GetOp1();
ASSERT(subop1_1 && subop1_2);
if ( ! same_expr(subop1_1, subop1_2) )
return false;
auto subop2_1 = op1->GetOp2();
auto subop2_2 = op2->GetOp2();
if ( subop2_1 && ! same_expr(subop2_1, subop2_2) )
return false;
auto subop3_1 = op1->GetOp3();
auto subop3_2 = op2->GetOp3();
return ! subop3_1 || same_expr(subop3_1, subop3_2);
}
static bool same_op(const ExprPtr& op1, const ExprPtr& op2, bool check_defs) {
@ -107,6 +121,7 @@ static bool same_expr(const Expr* e1, const Expr* e2, bool check_defs) {
case EXPR_CLONE:
case EXPR_RECORD_CONSTRUCTOR:
case EXPR_REC_CONSTRUCT_WITH_REC:
case EXPR_TABLE_CONSTRUCTOR:
case EXPR_SET_CONSTRUCTOR:
case EXPR_VECTOR_CONSTRUCTOR:
@ -473,7 +488,8 @@ bool Reducer::ExprValid(const ID* id, const Expr* e1, const Expr* e2) const {
has_side_effects = true;
}
else if ( e1->Tag() == EXPR_RECORD_CONSTRUCTOR || e1->Tag() == EXPR_RECORD_COERCE )
else if ( e1->Tag() == EXPR_RECORD_CONSTRUCTOR || e1->Tag() == EXPR_REC_CONSTRUCT_WITH_REC ||
e1->Tag() == EXPR_RECORD_COERCE )
has_side_effects = pfs->HasSideEffects(SideEffectsOp::CONSTRUCTION, e1->GetType());
e1_se = ExprSideEffects(has_side_effects);

View file

@ -592,7 +592,7 @@ void analyze_scripts(bool no_unused_warnings) {
func.SetShouldNotAnalyze();
if ( ! have_one_to_do )
reporter->FatalError("no matching functions/files for C++ compilation");
reporter->FatalError("no matching functions/files for script optimization");
if ( CPP_init_hook ) {
(*CPP_init_hook)();

View file

@ -143,8 +143,13 @@ StmtPtr ExprStmt::DoReduce(Reducer* c) {
// it has a non-void type it'll generate an
// assignment to a temporary.
red_e_stmt = e->ReduceToSingletons(c);
else
else {
e = e->Reduce(c, red_e_stmt);
// It's possible that 'e' has gone away because it was a call
// to an inlined function that doesn't have a return value.
if ( ! e )
return red_e_stmt;
}
if ( red_e_stmt ) {
auto s = make_intrusive<StmtList>(red_e_stmt, ThisPtr());
@ -735,11 +740,210 @@ StmtPtr StmtList::DoReduce(Reducer* c) {
return ThisPtr();
}
static unsigned int find_rec_assignment_chain(const std::vector<StmtPtr>& stmts, unsigned int i) {
const NameExpr* targ_rec = nullptr;
std::set<int> fields_seen;
for ( ; i < stmts.size(); ++i ) {
const auto& s = stmts[i];
// We're looking for either "x$a = y$b" or "x$a = x$a + y$b".
if ( s->Tag() != STMT_EXPR )
// No way it's an assignment.
return i;
auto se = s->AsExprStmt()->StmtExpr();
if ( se->Tag() != EXPR_ASSIGN )
return i;
// The LHS of an assignment starts with a RefExpr.
auto lhs_ref = se->GetOp1();
ASSERT(lhs_ref->Tag() == EXPR_REF);
auto lhs = lhs_ref->GetOp1();
if ( lhs->Tag() != EXPR_FIELD )
// Not of the form "x$a = ...".
return i;
auto lhs_field = lhs->AsFieldExpr()->Field();
if ( fields_seen.count(lhs_field) > 0 )
// Earlier in this chain we've already seen "x$a", so end the
// chain at this repeated use because it's no longer a simple
// block of field assignments.
return i;
fields_seen.insert(lhs_field);
auto lhs_rec = lhs->GetOp1();
if ( lhs_rec->Tag() != EXPR_NAME )
// Not a simple field reference, e.g. "x$y$a".
return i;
auto lhs_rec_n = lhs_rec->AsNameExpr();
if ( targ_rec ) {
if ( lhs_rec_n->Id() != targ_rec->Id() )
// It's no longer "x$..." but some new variable "z$...".
return i;
}
else
targ_rec = lhs_rec_n;
}
return i;
}
using OpChain = std::map<const ID*, std::vector<const Stmt*>>;
static void update_assignment_chains(const StmtPtr& s, OpChain& assign_chains, OpChain& add_chains) {
auto se = s->AsExprStmt()->StmtExpr();
ASSERT(se->Tag() == EXPR_ASSIGN);
// The first GetOp1() here accesses the EXPR_ASSIGN's first operand,
// which is a RefExpr; the second gets its operand, which we've guaranteed
// in find_rec_assignment_chain is a FieldExpr.
auto lhs_fe = se->GetOp1()->GetOp1()->AsFieldExpr();
auto lhs_id = lhs_fe->GetOp1()->AsNameExpr()->Id();
auto rhs = se->GetOp2();
const FieldExpr* f;
OpChain* c;
// Check whether RHS is either "y$b" or "x$a + y$b".
if ( rhs->Tag() == EXPR_ADD ) {
auto rhs_op1 = rhs->GetOp1(); // need to see that it's "x$a"
if ( rhs_op1->Tag() != EXPR_FIELD )
return;
auto rhs1_fe = rhs_op1->AsFieldExpr();
auto rhs_op1_rec = rhs1_fe->GetOp1();
if ( rhs_op1_rec->Tag() != EXPR_NAME || rhs_op1_rec->AsNameExpr()->Id() != lhs_id ||
rhs1_fe->Field() != lhs_fe->Field() )
return;
auto rhs_op2 = rhs->GetOp2(); // need to see that it's "y$b"
if ( rhs_op2->Tag() != EXPR_FIELD )
return;
if ( ! IsArithmetic(rhs_op2->GetType()->Tag()) )
// Avoid esoteric forms of adding.
return;
f = rhs_op2->AsFieldExpr();
c = &add_chains;
}
else if ( rhs->Tag() == EXPR_FIELD ) {
f = rhs->AsFieldExpr();
c = &assign_chains;
}
else
// Not a RHS we know how to leverage.
return;
auto f_rec = f->GetOp1();
if ( f_rec->Tag() != EXPR_NAME )
// Not a simple RHS, instead something like "y$z$b".
return;
// If we get here, it's a keeper, record the associated statement.
auto id = f_rec->AsNameExpr()->Id();
(*c)[id].push_back(s.get());
#if 0
auto cf = c->find(id);
if ( cf == c->end() )
(*c)[id] = std::vector<const Stmt*>{s.get()};
else
cf->second.push_back(s.get());
#endif
}
static StmtPtr transform_chain(const OpChain& c, ExprTag t, std::set<const Stmt*>& chain_stmts) {
IntrusivePtr<StmtList> sl;
for ( auto& id_stmts : c ) {
auto orig_s = id_stmts.second;
if ( ! sl )
// Now that we have a statement, create our list and associate
// its location with the statement.
sl = with_location_of(make_intrusive<StmtList>(), orig_s[0]);
ExprPtr e;
if ( t == EXPR_ASSIGN )
e = make_intrusive<AssignRecordFieldsExpr>(orig_s, chain_stmts);
else if ( t == EXPR_ADD )
e = make_intrusive<AddRecordFieldsExpr>(orig_s, chain_stmts);
else
reporter->InternalError("inconsistency transforming assignment chain");
e->SetLocationInfo(sl->GetLocationInfo());
auto es = with_location_of(make_intrusive<ExprStmt>(std::move(e)), sl);
sl->Stmts().emplace_back(std::move(es));
}
return sl;
}
static bool simplify_chain(const std::vector<StmtPtr>& stmts, unsigned int start, unsigned int end,
std::vector<StmtPtr>& f_stmts) {
OpChain assign_chains;
OpChain add_chains;
std::set<const Stmt*> chain_stmts;
for ( auto i = start; i <= end; ++i ) {
auto& s = stmts[i];
chain_stmts.insert(s.get());
update_assignment_chains(s, assign_chains, add_chains);
}
// An add-chain of any size is a win. For an assign-chain to be a win,
// it needs to have at least two elements, because a single "x$a = y$b"
// can be expressed using one ZAM instructino (but "x$a += y$b" cannot).
if ( add_chains.empty() ) {
bool have_useful_assign_chain = false;
for ( auto& ac : assign_chains )
if ( ac.second.size() > 1 ) {
have_useful_assign_chain = true;
break;
}
if ( ! have_useful_assign_chain )
// No gains available.
return false;
}
auto as_c = transform_chain(assign_chains, EXPR_ASSIGN, chain_stmts);
auto ad_c = transform_chain(add_chains, EXPR_ADD, chain_stmts);
ASSERT(as_c || ad_c);
if ( as_c )
f_stmts.push_back(as_c);
if ( ad_c )
f_stmts.push_back(ad_c);
// At this point, chain_stmts has only the remainders that weren't removed.
for ( auto s : stmts )
if ( chain_stmts.count(s.get()) > 0 )
f_stmts.push_back(s);
return true;
}
bool StmtList::ReduceStmt(unsigned int& s_i, std::vector<StmtPtr>& f_stmts, Reducer* c) {
bool did_change = false;
auto& stmt_i = stmts[s_i];
auto old_stmt = stmt_i;
auto chain_end = find_rec_assignment_chain(stmts, s_i);
if ( chain_end > s_i && simplify_chain(stmts, s_i, chain_end - 1, f_stmts) ) {
s_i = chain_end - 1;
return true;
}
auto stmt = stmt_i->Reduce(c);
if ( stmt != old_stmt )

View file

@ -464,6 +464,13 @@ UDs UseDefs::ExprUDs(const Expr* e) {
break;
}
case EXPR_REC_CONSTRUCT_WITH_REC: {
auto constructor_UDs = ExprUDs(e->GetOp1().get());
AddInExprUDs(uds, e->GetOp2().get());
uds = UD_Union(uds, constructor_UDs);
break;
}
case EXPR_TABLE_CONSTRUCTOR: {
auto t = static_cast<const TableConstructorExpr*>(e);
AddInExprUDs(uds, t->GetOp1().get());

View file

@ -189,6 +189,7 @@ private:
const ZAMStmt CompileAddToExpr(const AddToExpr* e);
const ZAMStmt CompileRemoveFromExpr(const RemoveFromExpr* e);
const ZAMStmt CompileAssignExpr(const AssignExpr* e);
const ZAMStmt CompileRecFieldUpdates(const RecordFieldUpdatesExpr* e);
const ZAMStmt CompileZAMBuiltin(const NameExpr* lhs, const ScriptOptBuiltinExpr* zbi);
const ZAMStmt CompileAssignToIndex(const NameExpr* lhs, const IndexExpr* rhs);
const ZAMStmt CompileFieldLHSAssignExpr(const FieldLHSAssignExpr* e);
@ -244,7 +245,9 @@ private:
const ZAMStmt ConstructTable(const NameExpr* n, const Expr* e);
const ZAMStmt ConstructSet(const NameExpr* n, const Expr* e);
const ZAMStmt ConstructRecord(const NameExpr* n, const Expr* e);
const ZAMStmt ConstructRecord(const NameExpr* n, const Expr* e) { return ConstructRecord(n, e, false); }
const ZAMStmt ConstructRecordFromRecord(const NameExpr* n, const Expr* e) { return ConstructRecord(n, e, true); }
const ZAMStmt ConstructRecord(const NameExpr* n, const Expr* e, bool is_from_rec);
const ZAMStmt ConstructVector(const NameExpr* n, const Expr* e);
const ZAMStmt ArithCoerce(const NameExpr* n, const Expr* e);

View file

@ -26,6 +26,9 @@ const ZAMStmt ZAMCompiler::CompileExpr(const Expr* e) {
case EXPR_ASSIGN: return CompileAssignExpr(static_cast<const AssignExpr*>(e));
case EXPR_REC_ASSIGN_FIELDS:
case EXPR_REC_ADD_FIELDS: return CompileRecFieldUpdates(static_cast<const RecordFieldUpdatesExpr*>(e));
case EXPR_INDEX_ASSIGN: {
auto iae = static_cast<const IndexAssignExpr*>(e);
auto t = iae->GetOp1()->GetType()->Tag();
@ -281,6 +284,77 @@ const ZAMStmt ZAMCompiler::CompileAssignExpr(const AssignExpr* e) {
#include "ZAM-GenExprsDefsV.h"
}
const ZAMStmt ZAMCompiler::CompileRecFieldUpdates(const RecordFieldUpdatesExpr* e) {
auto rhs = e->GetOp2()->AsNameExpr();
auto& rhs_map = e->RHSMap();
auto aux = new ZInstAux(0);
aux->map = e->LHSMap();
aux->rhs_map = rhs_map;
// Used to track the different types present, so we can see whether
// we can use a homogeneous operator or need a mixed one. Won't be
// needed if we're doing assignments, but handy if we're doing adds.
std::set<TypeTag> field_tags;
size_t num_managed = 0;
for ( auto i : rhs_map ) {
auto rt = rhs->GetType()->AsRecordType();
auto rt_ft_i = rt->GetFieldType(i);
field_tags.insert(rt_ft_i->Tag());
if ( ZVal::IsManagedType(rt_ft_i) ) {
aux->is_managed.push_back(true);
++num_managed;
}
else
// This will only be needed if is_managed winds up being true,
// but it's harmless to build it up in any case.
aux->is_managed.push_back(false);
// The following is only needed for non-homogeneous "add"s, but
// likewise it's harmless to build it anyway.
aux->types.push_back(rt_ft_i);
}
bool homogeneous = field_tags.size() == 1;
// Here we leverage the fact that C++ "+=" works identically for
// signed and unsigned int's.
if ( ! homogeneous && field_tags.size() == 2 && field_tags.count(TYPE_INT) > 0 && field_tags.count(TYPE_COUNT) > 0 )
homogeneous = true;
ZOp op;
if ( e->Tag() == EXPR_REC_ASSIGN_FIELDS ) {
if ( num_managed == rhs_map.size() )
// This operand allows for a simpler implementation.
op = OP_REC_ASSIGN_FIELDS_ALL_MANAGED_VV;
else if ( num_managed > 0 )
op = OP_REC_ASSIGN_FIELDS_MANAGED_VV;
else
op = OP_REC_ASSIGN_FIELDS_VV;
}
else if ( homogeneous ) {
if ( field_tags.count(TYPE_DOUBLE) > 0 )
op = OP_REC_ADD_DOUBLE_FIELDS_VV;
else
// Here we leverage that += will work for both signed/unsigned.
op = OP_REC_ADD_INT_FIELDS_VV;
}
else
op = OP_REC_ADD_FIELDS_VV;
auto lhs = e->GetOp1()->AsNameExpr();
auto z = GenInst(op, lhs, rhs);
z.aux = aux;
return AddInst(z);
}
const ZAMStmt ZAMCompiler::CompileZAMBuiltin(const NameExpr* lhs, const ScriptOptBuiltinExpr* zbi) {
auto op1 = zbi->GetOp1();
auto op2 = zbi->GetOp2();
@ -1253,10 +1327,11 @@ const ZAMStmt ZAMCompiler::ConstructSet(const NameExpr* n, const Expr* e) {
return AddInst(z);
}
const ZAMStmt ZAMCompiler::ConstructRecord(const NameExpr* n, const Expr* e) {
ASSERT(e->Tag() == EXPR_RECORD_CONSTRUCTOR);
auto rc = static_cast<const RecordConstructorExpr*>(e);
auto rt = e->GetType()->AsRecordType();
const ZAMStmt ZAMCompiler::ConstructRecord(const NameExpr* n, const Expr* e, bool is_from_rec) {
auto rec_e = is_from_rec ? e->GetOp1().get() : e;
ASSERT(rec_e->Tag() == EXPR_RECORD_CONSTRUCTOR);
auto rc = static_cast<const RecordConstructorExpr*>(rec_e);
auto rt = rec_e->GetType()->AsRecordType();
auto aux = InternalBuildVals(rc->Op().get());
@ -1266,7 +1341,7 @@ const ZAMStmt ZAMCompiler::ConstructRecord(const NameExpr* n, const Expr* e) {
// constructor.
aux->zvec.resize(rt->NumFields());
if ( pfs->HasSideEffects(SideEffectsOp::CONSTRUCTION, e->GetType()) )
if ( pfs->HasSideEffects(SideEffectsOp::CONSTRUCTION, rec_e->GetType()) )
aux->can_change_non_locals = true;
ZOp op;
@ -1331,33 +1406,89 @@ const ZAMStmt ZAMCompiler::ConstructRecord(const NameExpr* n, const Expr* e) {
else
op = OP_CONSTRUCT_DIRECT_RECORD_V;
ZInstI z = network_time_index >= 0 ? GenInst(op, n, network_time_index) : GenInst(op, n);
ZInstI z;
if ( is_from_rec ) {
// Map non-from-rec operand to the from-rec equivalent.
switch ( op ) {
case OP_CONSTRUCT_KNOWN_RECORD_WITH_NT_VV: op = OP_CONSTRUCT_KNOWN_RECORD_WITH_NT_FROM_VVV; break;
case OP_CONSTRUCT_KNOWN_RECORD_V: op = OP_CONSTRUCT_KNOWN_RECORD_FROM_VV; break;
case OP_CONSTRUCT_KNOWN_RECORD_WITH_INITS_AND_NT_VV:
op = OP_CONSTRUCT_KNOWN_RECORD_WITH_INITS_AND_NT_FROM_VVV;
break;
case OP_CONSTRUCT_KNOWN_RECORD_WITH_INITS_V:
op = OP_CONSTRUCT_KNOWN_RECORD_WITH_INITS_FROM_VV;
break;
// Note, no case for OP_CONSTRUCT_DIRECT_RECORD_V - shouldn't
// happen given how we construct ConstructFromRecordExpr's.
default: reporter->InternalError("bad op in ZAMCompiler::ConstructRecord");
}
auto cfr = static_cast<const ConstructFromRecordExpr*>(e);
auto from_n = cfr->GetOp2()->AsNameExpr();
if ( network_time_index >= 0 )
z = GenInst(op, n, from_n, network_time_index);
else
z = GenInst(op, n, from_n);
aux->lhs_map = cfr->LHSMap();
aux->rhs_map = cfr->RHSMap();
for ( auto i : aux->lhs_map ) {
auto& field_t = rt->GetFieldType(i);
aux->is_managed.push_back(ZVal::IsManagedType(field_t));
}
}
else
z = network_time_index >= 0 ? GenInst(op, n, network_time_index) : GenInst(op, n);
z.aux = aux;
z.t = e->GetType();
z.t = rec_e->GetType();
auto inst = AddInst(z);
// If one of the initialization values is an unspecified vector (which
// in general we can't know until run-time) then we'll need to
// "concretize" it. We first see whether this is a possibility, since
// it usually isn't, by counting up how many of the record fields are
// vectors.
std::vector<int> vector_fields; // holds indices of the vector fields
// it usually isn't, by counting up how many of the initialized record
// fields are vectors.
// First just gather up the types of all the fields, and their location
// in the target.
std::vector<std::pair<TypePtr, int>> init_field_types;
for ( int i = 0; i < z.aux->n; ++i ) {
auto field_ind = map ? (*map)[i] : i;
auto& field_t = rt->GetFieldType(field_ind);
if ( field_t->Tag() == TYPE_VECTOR && field_t->Yield()->Tag() != TYPE_ANY )
vector_fields.push_back(field_ind);
init_field_types.emplace_back(field_t, field_ind);
}
if ( is_from_rec )
// Need to also check the source record.
for ( auto i : aux->lhs_map ) {
auto& field_t = rt->GetFieldType(i);
init_field_types.emplace_back(field_t, i);
}
// Now spin through to find the vector fields.
std::vector<int> vector_fields; // holds indices of the vector fields
for ( auto& ft : init_field_types )
if ( ft.first->Tag() == TYPE_VECTOR && ft.first->Yield()->Tag() != TYPE_ANY )
vector_fields.push_back(ft.second);
if ( vector_fields.empty() )
// Common case of no vector fields, we're done.
return inst;
// Need to add a separate instruction for concretizing the fields.
z = GenInst(OP_CONCRETIZE_VECTOR_FIELDS_V, n);
z.t = e->GetType();
z.t = rec_e->GetType();
int nf = static_cast<int>(vector_fields.size());
z.aux = new ZInstAux(nf);
z.aux->elems_has_slots = false; // we're storing field offsets, not slots

View file

@ -51,9 +51,9 @@ public:
if ( lv < 0 )
continue;
auto& var = frame[lv];
if ( aux->lvt_is_managed[i] )
if ( aux->is_managed[i] )
ZVal::DeleteManagedType(var);
auto& t = aux->loop_var_types[i];
auto& t = aux->types[i];
var = ZVal(ind_lv_p, t);
}

View file

@ -1230,6 +1230,8 @@ eval ConstructTableOrSetPre()
direct-unary-op Record-Constructor ConstructRecord
direct-unary-op Rec-Construct-With-Rec ConstructRecordFromRecord
macro ConstructRecordPost()
auto& r = frame[z.v1].record_val;
Unref(r);
@ -1245,29 +1247,150 @@ type V
eval auto init_vals = z.aux->ToZValVecWithMap(frame);
ConstructRecordPost()
macro AssignFromRec()
/* The following is defined below, for use by Rec-Assign-Fields */
SetUpRecFieldOps(lhs_map)
auto is_managed = aux->is_managed;
for ( size_t i = 0U; i < n; ++i )
{
auto rhs_i = rhs->RawField(rhs_map[i]);
if ( is_managed[i] )
zeek::Ref(rhs_i.ManagedVal());
init_vals[lhs_map[i]] = rhs_i;
}
op Construct-Known-Record-From
type VV
eval auto init_vals = z.aux->ToZValVecWithMap(frame);
AssignFromRec()
ConstructRecordPost()
macro DoNetworkTimeInit(slot)
init_vals[slot] = ZVal(run_state::network_time);
op Construct-Known-Record-With-NT
type VV
eval auto init_vals = z.aux->ToZValVecWithMap(frame);
ASSERT(! init_vals[z.v2]);
init_vals[z.v2] = ZVal(run_state::network_time);
DoNetworkTimeInit(z.v2)
ConstructRecordPost()
op Construct-Known-Record-With-NT-From
type VVV
eval auto init_vals = z.aux->ToZValVecWithMap(frame);
DoNetworkTimeInit(z.v3)
AssignFromRec()
ConstructRecordPost()
macro GenInits()
auto init_vals = z.aux->ToZValVecWithMap(frame);
for ( auto& fi : *z.aux->field_inits )
init_vals[fi.first] = fi.second->Generate();
op Construct-Known-Record-With-Inits
type V
eval auto init_vals = z.aux->ToZValVecWithMap(frame);
for ( auto& fi : *z.aux->field_inits )
init_vals[fi.first] = fi.second->Generate();
eval GenInits()
ConstructRecordPost()
op Construct-Known-Record-With-Inits-From
type VV
eval GenInits()
AssignFromRec()
ConstructRecordPost()
op Construct-Known-Record-With-Inits-And-NT
type VV
eval auto init_vals = z.aux->ToZValVecWithMap(frame);
for ( auto& fi : *z.aux->field_inits )
init_vals[fi.first] = fi.second->Generate();
ASSERT(! init_vals[z.v2]);
init_vals[z.v2] = ZVal(run_state::network_time);
eval GenInits()
DoNetworkTimeInit(z.v2)
ConstructRecordPost()
op Construct-Known-Record-With-Inits-And-NT-From
type VVV
eval GenInits()
DoNetworkTimeInit(z.v3)
AssignFromRec()
ConstructRecordPost()
macro SetUpRecFieldOps(which_lhs_map)
auto lhs = frame[z.v1].record_val;
auto rhs = frame[z.v2].record_val;
auto aux = z.aux;
auto& lhs_map = aux->which_lhs_map;
auto& rhs_map = aux->rhs_map;
auto n = rhs_map.size();
op Rec-Assign-Fields
op1-read
type VV
eval SetUpRecFieldOps(map)
for ( size_t i = 0U; i < n; ++i )
lhs->RawOptField(lhs_map[i]) = rhs->RawField(rhs_map[i]);
macro DoManagedRecAssign()
auto is_managed = aux->is_managed;
for ( size_t i = 0U; i < n; ++i )
if ( is_managed[i] )
{
auto& lhs_i = lhs->RawOptField(lhs_map[i]);
auto rhs_i = rhs->RawField(rhs_map[i]);
zeek::Ref(rhs_i.ManagedVal());
if ( lhs_i )
ZVal::DeleteManagedType(*lhs_i);
lhs_i = rhs_i;
}
else
lhs->RawOptField(lhs_map[i]) = rhs->RawField(rhs_map[i]);
op Rec-Assign-Fields-Managed
op1-read
type VV
eval SetUpRecFieldOps(map)
DoManagedRecAssign()
op Rec-Assign-Fields-All-Managed
op1-read
type VV
eval SetUpRecFieldOps(map)
for ( size_t i = 0U; i < n; ++i )
{
auto& lhs_i = lhs->RawOptField(lhs_map[i]);
auto rhs_i = rhs->RawField(rhs_map[i]);
zeek::Ref(rhs_i.ManagedVal());
if ( lhs_i )
ZVal::DeleteManagedType(*lhs_i);
lhs_i = rhs_i;
}
op Rec-Add-Int-Fields
op1-read
type VV
eval SetUpRecFieldOps(map)
for ( size_t i = 0U; i < n; ++i )
lhs->RawField(lhs_map[i]).int_val += rhs->RawField(rhs_map[i]).int_val;
op Rec-Add-Double-Fields
op1-read
type VV
eval SetUpRecFieldOps(map)
for ( size_t i = 0U; i < n; ++i )
lhs->RawField(lhs_map[i]).double_val += rhs->RawField(rhs_map[i]).double_val;
op Rec-Add-Fields
op1-read
type VV
eval SetUpRecFieldOps(map)
auto& types = aux->types;
for ( size_t i = 0U; i < n; ++i )
{
auto& lhs_i = lhs->RawField(lhs_map[i]);
auto rhs_i = rhs->RawField(rhs_map[i]);
auto tag = types[i]->Tag();
if ( tag == TYPE_INT )
lhs_i.int_val += rhs_i.int_val;
else if ( tag == TYPE_COUNT )
lhs_i.uint_val += rhs_i.uint_val;
else
lhs_i.double_val += rhs_i.double_val;
}
# Special instruction for concretizing vectors that are fields in a
# newly-constructed record. "aux" holds which fields in the record to
# inspect.

View file

@ -711,8 +711,8 @@ const ZAMStmt ZAMCompiler::LoopOverTable(const ForStmt* f, const NameExpr* val)
int slot = id->IsBlank() ? -1 : FrameSlot(id);
aux->loop_vars.push_back(slot);
auto& t = id->GetType();
aux->loop_var_types.push_back(t);
aux->lvt_is_managed.push_back(ZVal::IsManagedType(t));
aux->types.push_back(t);
aux->is_managed.push_back(ZVal::IsManagedType(t));
}
bool no_loop_vars = (num_unused == loop_vars->length());

View file

@ -484,20 +484,34 @@ public:
// store here.
bool can_change_non_locals = false;
// The following is used for constructing records, to map elements in
// slots/constants/types to record field offsets.
// The following is used for constructing records or in record chain
// operations, to map elements in slots/constants/types to record field
// offsets.
std::vector<int> map;
// The following is used when we need two maps, a LHS one (done with
// the above) and a RHS one.
std::vector<int> rhs_map;
// ... and the following when we need *three* (for constructing certain
// types of records). We could hack it in by adding onto "map" but
// this is cleaner, and we're not really concerned with the size of
// ZAM auxiliary information as it's not that commonly used, and doesn't
// grow during execution.
std::vector<int> lhs_map;
// For operations that need to track types corresponding to other vectors.
std::vector<TypePtr> types;
// For operations that mix managed and unmanaged assignments.
std::vector<bool> is_managed;
///// The following four apply to looping over the elements of tables.
// Frame slots of iteration variables, such as "[v1, v2, v3] in aggr".
// A negative value means "skip assignment".
std::vector<int> loop_vars;
// Their types and whether they're managed.
std::vector<TypePtr> loop_var_types;
std::vector<bool> lvt_is_managed;
// Type associated with the "value" entry, for "k, value in aggr"
// iteration.
TypePtr value_var_type;

View file

@ -0,0 +1,3 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
[a=-42.3, b=-12, c=3, d=3, e=-10012, f=1003.0, g=tail]
[a=-84.6, b=-24, c=1006, d=1006, e=-20024, f=-9039.3, g=intervening]

View file

@ -1,2 +1,2 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
fatal error: no matching functions/files for C++ compilation
fatal error: no matching functions/files for script optimization

View file

@ -1,2 +1,2 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
fatal error: no matching functions/files for C++ compilation
fatal error: no matching functions/files for script optimization

View file

@ -1,2 +1,2 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
fatal error: no matching functions/files for C++ compilation
fatal error: no matching functions/files for script optimization

View file

@ -0,0 +1,47 @@
# @TEST-DOC: Test for correct ZAM optimization of record "chains".
#
# @TEST-EXEC: zeek -b -O ZAM %INPUT >output
# @TEST-EXEC: btest-diff output
type R: record {
a: count;
b: int;
c: double;
};
type Rev_R: record {
a: double;
b: int;
c: count;
d: count;
e: int;
f: double;
g: string;
};
global r1 = R($a = 3, $b = -12, $c = -42.3);
global r2 = R($a = 1003, $b = -10012, $c = -10042.3);
global r3: Rev_R;
r3$a = r1$c;
r3$b = r1$b;
r3$c = r1$a;
r3$d = r1$a;
r3$e = r2$b;
r3$f = r2$a;
r3$g = "tail";
print r3;
r3$a += r1$c;
r3$b += r1$b;
r3$g = "intervening";
r3$c += r2$a;
r3$d += r2$a;
r3$e += r2$b;
r3$f += r2$c;
print r3;