Merge remote-tracking branch 'remotes/origin/feature_arrange_with_libnest2d'
This commit is contained in:
commit
36c1483ec5
@ -82,8 +82,8 @@ if(LIBNEST2D_OPTIMIZER_BACKEND STREQUAL "nlopt")
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/libnest2d/optimizers/genetic.hpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/libnest2d/optimizers/nlopt_boilerplate.hpp)
|
||||
list(APPEND LIBNEST2D_LIBRARIES ${NLopt_LIBS}
|
||||
# Threads::Threads
|
||||
)
|
||||
# Threads::Threads
|
||||
)
|
||||
list(APPEND LIBNEST2D_HEADERS ${NLopt_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
|
@ -84,7 +84,8 @@ void arrangeRectangles() {
|
||||
// {{0, 0}, {0, 20*SCALE}, {10*SCALE, 0}, {0, 0}}
|
||||
// };
|
||||
|
||||
std::vector<Item> crasher = {
|
||||
std::vector<Item> crasher =
|
||||
{
|
||||
{
|
||||
{-5000000, 8954050},
|
||||
{5000000, 8954050},
|
||||
@ -527,12 +528,12 @@ void arrangeRectangles() {
|
||||
};
|
||||
|
||||
std::vector<Item> input;
|
||||
// input.insert(input.end(), prusaParts().begin(), prusaParts().end());
|
||||
input.insert(input.end(), prusaParts().begin(), prusaParts().end());
|
||||
// input.insert(input.end(), prusaExParts().begin(), prusaExParts().end());
|
||||
// input.insert(input.end(), stegoParts().begin(), stegoParts().end());
|
||||
// input.insert(input.end(), rects.begin(), rects.end());
|
||||
// input.insert(input.end(), proba.begin(), proba.end());
|
||||
input.insert(input.end(), crasher.begin(), crasher.end());
|
||||
// input.insert(input.end(), crasher.begin(), crasher.end());
|
||||
|
||||
Box bin(250*SCALE, 210*SCALE);
|
||||
|
||||
@ -545,18 +546,18 @@ void arrangeRectangles() {
|
||||
|
||||
Packer::PlacementConfig pconf;
|
||||
pconf.alignment = Placer::Config::Alignment::CENTER;
|
||||
pconf.starting_point = Placer::Config::Alignment::CENTER;
|
||||
pconf.rotations = {0.0/*, Pi/2.0, Pi, 3*Pi/2*/};
|
||||
pconf.object_function = [&bin](Placer::Pile pile, double area,
|
||||
double norm, double penality) {
|
||||
|
||||
auto bb = ShapeLike::boundingBox(pile);
|
||||
|
||||
double diameter = PointLike::distance(bb.minCorner(),
|
||||
bb.maxCorner());
|
||||
|
||||
// We will optimize to the diameter of the circle around the bounding
|
||||
// box and use the norming factor to get rid of the physical dimensions
|
||||
double score = diameter / norm;
|
||||
auto& sh = pile.back();
|
||||
auto rv = Nfp::referenceVertex(sh);
|
||||
auto c = bin.center();
|
||||
auto d = PointLike::distance(rv, c);
|
||||
double score = double(d)/norm;
|
||||
|
||||
// If it does not fit into the print bed we will beat it
|
||||
// with a large penality
|
||||
@ -568,7 +569,9 @@ void arrangeRectangles() {
|
||||
Packer::SelectionConfig sconf;
|
||||
// sconf.allow_parallel = false;
|
||||
// sconf.force_parallel = false;
|
||||
// sconf.try_triplets = true;
|
||||
// sconf.try_reverse_order = true;
|
||||
// sconf.waste_increment = 0.1;
|
||||
|
||||
arrange.configure(pconf, sconf);
|
||||
|
||||
|
@ -26,11 +26,20 @@ struct NfpPConfig {
|
||||
/// Where to align the resulting packed pile
|
||||
Alignment alignment;
|
||||
|
||||
Alignment starting_point;
|
||||
|
||||
std::function<double(const Nfp::Shapes<RawShape>&, double, double, double)>
|
||||
object_function;
|
||||
|
||||
/**
|
||||
* @brief The quality of search for an optimal placement.
|
||||
* This is a compromise slider between quality and speed. Zero is the
|
||||
* fast and poor solution while 1.0 is the slowest but most accurate.
|
||||
*/
|
||||
float accuracy = 1.0;
|
||||
|
||||
NfpPConfig(): rotations({0.0, Pi/2.0, Pi, 3*Pi/2}),
|
||||
alignment(Alignment::CENTER) {}
|
||||
alignment(Alignment::CENTER), starting_point(Alignment::CENTER) {}
|
||||
};
|
||||
|
||||
// A class for getting a point on the circumference of the polygon (in log time)
|
||||
@ -39,14 +48,6 @@ template<class RawShape> class EdgeCache {
|
||||
using Coord = TCoord<Vertex>;
|
||||
using Edge = _Segment<Vertex>;
|
||||
|
||||
// enum Corners {
|
||||
// BOTTOM,
|
||||
// LEFT,
|
||||
// RIGHT,
|
||||
// TOP,
|
||||
// NUM_CORNERS
|
||||
// };
|
||||
|
||||
mutable std::vector<double> corners_;
|
||||
|
||||
std::vector<Edge> emap_;
|
||||
@ -70,49 +71,9 @@ template<class RawShape> class EdgeCache {
|
||||
void fetchCorners() const {
|
||||
if(!corners_.empty()) return;
|
||||
|
||||
// TODO Accuracy
|
||||
corners_ = distances_;
|
||||
for(auto& d : corners_) {
|
||||
d /= full_distance_;
|
||||
}
|
||||
|
||||
// corners_ = std::vector<double>(NUM_CORNERS, 0.0);
|
||||
|
||||
// std::vector<unsigned> idx_ud(emap_.size(), 0);
|
||||
// std::vector<unsigned> idx_lr(emap_.size(), 0);
|
||||
|
||||
// std::iota(idx_ud.begin(), idx_ud.end(), 0);
|
||||
// std::iota(idx_lr.begin(), idx_lr.end(), 0);
|
||||
|
||||
// std::sort(idx_ud.begin(), idx_ud.end(),
|
||||
// [this](unsigned idx1, unsigned idx2)
|
||||
// {
|
||||
// const Vertex& v1 = emap_[idx1].first();
|
||||
// const Vertex& v2 = emap_[idx2].first();
|
||||
|
||||
// auto diff = getY(v1) - getY(v2);
|
||||
// if(std::abs(diff) <= std::numeric_limits<Coord>::epsilon())
|
||||
// return getX(v1) < getX(v2);
|
||||
|
||||
// return diff < 0;
|
||||
// });
|
||||
|
||||
// std::sort(idx_lr.begin(), idx_lr.end(),
|
||||
// [this](unsigned idx1, unsigned idx2)
|
||||
// {
|
||||
// const Vertex& v1 = emap_[idx1].first();
|
||||
// const Vertex& v2 = emap_[idx2].first();
|
||||
|
||||
// auto diff = getX(v1) - getX(v2);
|
||||
// if(std::abs(diff) <= std::numeric_limits<Coord>::epsilon())
|
||||
// return getY(v1) < getY(v2);
|
||||
|
||||
// return diff < 0;
|
||||
// });
|
||||
|
||||
// corners_[BOTTOM] = distances_[idx_ud.front()]/full_distance_;
|
||||
// corners_[TOP] = distances_[idx_ud.back()]/full_distance_;
|
||||
// corners_[LEFT] = distances_[idx_lr.front()]/full_distance_;
|
||||
// corners_[RIGHT] = distances_[idx_lr.back()]/full_distance_;
|
||||
for(auto& d : corners_) d /= full_distance_;
|
||||
}
|
||||
|
||||
public:
|
||||
@ -167,12 +128,6 @@ public:
|
||||
|
||||
inline double circumference() const BP2D_NOEXCEPT { return full_distance_; }
|
||||
|
||||
// inline double corner(Corners c) const BP2D_NOEXCEPT {
|
||||
// assert(c < NUM_CORNERS);
|
||||
// fetchCorners();
|
||||
// return corners_[c];
|
||||
// }
|
||||
|
||||
inline const std::vector<double>& corners() const BP2D_NOEXCEPT {
|
||||
fetchCorners();
|
||||
return corners_;
|
||||
@ -400,7 +355,7 @@ public:
|
||||
|
||||
opt::StopCriteria stopcr;
|
||||
stopcr.max_iterations = 1000;
|
||||
stopcr.stoplimit = 0.01;
|
||||
stopcr.stoplimit = 0.001;
|
||||
stopcr.type = opt::StopLimitType::RELATIVE;
|
||||
opt::TOptimizer<opt::Method::L_SIMPLEX> solver(stopcr);
|
||||
|
||||
@ -518,11 +473,37 @@ private:
|
||||
|
||||
void setInitialPosition(Item& item) {
|
||||
Box&& bb = item.boundingBox();
|
||||
Vertex ci, cb;
|
||||
|
||||
Vertex ci = bb.minCorner();
|
||||
Vertex cb = bin_.minCorner();
|
||||
switch(config_.starting_point) {
|
||||
case Config::Alignment::CENTER: {
|
||||
ci = bb.center();
|
||||
cb = bin_.center();
|
||||
break;
|
||||
}
|
||||
case Config::Alignment::BOTTOM_LEFT: {
|
||||
ci = bb.minCorner();
|
||||
cb = bin_.minCorner();
|
||||
break;
|
||||
}
|
||||
case Config::Alignment::BOTTOM_RIGHT: {
|
||||
ci = {getX(bb.maxCorner()), getY(bb.minCorner())};
|
||||
cb = {getX(bin_.maxCorner()), getY(bin_.minCorner())};
|
||||
break;
|
||||
}
|
||||
case Config::Alignment::TOP_LEFT: {
|
||||
ci = {getX(bb.minCorner()), getY(bb.maxCorner())};
|
||||
cb = {getX(bin_.minCorner()), getY(bin_.maxCorner())};
|
||||
break;
|
||||
}
|
||||
case Config::Alignment::TOP_RIGHT: {
|
||||
ci = bb.maxCorner();
|
||||
cb = bin_.maxCorner();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
auto&& d = cb - ci;
|
||||
auto d = cb - ci;
|
||||
item.translate(d);
|
||||
}
|
||||
|
||||
|
@ -41,11 +41,24 @@ public:
|
||||
struct Config {
|
||||
|
||||
/**
|
||||
* If true, the algorithm will try to place pair and driplets in all
|
||||
* possible order.
|
||||
* If true, the algorithm will try to place pair and triplets in all
|
||||
* possible order. It will have a hugely negative impact on performance.
|
||||
*/
|
||||
bool try_reverse_order = true;
|
||||
|
||||
/**
|
||||
* @brief try_pairs Whether to try pairs of items to pack. It will add
|
||||
* a quadratic component to the complexity.
|
||||
*/
|
||||
bool try_pairs = true;
|
||||
|
||||
/**
|
||||
* @brief Whether to try groups of 3 items to pack. This could be very
|
||||
* slow for large number of items (>100) as it adds a cubic component
|
||||
* to the complexity.
|
||||
*/
|
||||
bool try_triplets = false;
|
||||
|
||||
/**
|
||||
* The initial fill proportion of the bin area that will be filled before
|
||||
* trying items one by one, or pairs or triplets.
|
||||
@ -151,8 +164,8 @@ public:
|
||||
return std::any_of(wrong_pairs.begin(), wrong_pairs.end(),
|
||||
[&i1, &i2](const TPair& pair)
|
||||
{
|
||||
Item& pi1 = std::get<0>(pair), pi2 = std::get<1>(pair);
|
||||
Item& ri1 = i1, ri2 = i2;
|
||||
Item& pi1 = std::get<0>(pair), &pi2 = std::get<1>(pair);
|
||||
Item& ri1 = i1, &ri2 = i2;
|
||||
return (&pi1 == &ri1 && &pi2 == &ri2) ||
|
||||
(&pi1 == &ri2 && &pi2 == &ri1);
|
||||
});
|
||||
@ -172,7 +185,7 @@ public:
|
||||
Item& pi1 = std::get<0>(tripl);
|
||||
Item& pi2 = std::get<1>(tripl);
|
||||
Item& pi3 = std::get<2>(tripl);
|
||||
Item& ri1 = i1, ri2 = i2, ri3 = i3;
|
||||
Item& ri1 = i1, &ri2 = i2, &ri3 = i3;
|
||||
return (&pi1 == &ri1 && &pi2 == &ri2 && &pi3 == &ri3) ||
|
||||
(&pi1 == &ri1 && &pi2 == &ri3 && &pi3 == &ri2) ||
|
||||
(&pi1 == &ri2 && &pi2 == &ri1 && &pi3 == &ri3) ||
|
||||
@ -348,6 +361,10 @@ public:
|
||||
// Will be true if a succesfull pack can be made.
|
||||
bool ret = false;
|
||||
|
||||
auto area = [](const ItemListIt& it) {
|
||||
return it->get().area();
|
||||
};
|
||||
|
||||
while (it != endit && !ret) { // drill down 1st level
|
||||
|
||||
// We need to determine in each iteration the largest, second
|
||||
@ -361,7 +378,7 @@ public:
|
||||
|
||||
// Check if there is enough free area for the item and the two
|
||||
// largest item
|
||||
if(free_area - it->get().area() - area_of_two_largest > waste)
|
||||
if(free_area - area(it) - area_of_two_largest > waste)
|
||||
break;
|
||||
|
||||
// Determine the area of the two smallest item.
|
||||
@ -373,7 +390,7 @@ public:
|
||||
double area_of_two_smallest =
|
||||
smallest.area() + second_smallest.area();
|
||||
|
||||
if(it->get().area() + area_of_two_smallest > free_area) {
|
||||
if(area(it) + area_of_two_smallest > free_area) {
|
||||
it++; continue;
|
||||
}
|
||||
|
||||
@ -384,16 +401,18 @@ public:
|
||||
|
||||
it2 = not_packed.begin();
|
||||
double rem2_area = free_area - largest.area();
|
||||
double a2_sum = it->get().area() + it2->get().area();
|
||||
double a2_sum = 0;
|
||||
|
||||
while(it2 != endit && !ret &&
|
||||
rem2_area - a2_sum <= waste) { // Drill down level 2
|
||||
rem2_area - (a2_sum = area(it) + area(it2)) <= waste) {
|
||||
// Drill down level 2
|
||||
|
||||
if(a2_sum != area(it) + area(it2)) throw -1;
|
||||
|
||||
if(it == it2 || check_pair(wrong_pairs, *it, *it2)) {
|
||||
it2++; continue;
|
||||
}
|
||||
|
||||
a2_sum = it->get().area() + it2->get().area();
|
||||
if(a2_sum + smallest.area() > free_area) {
|
||||
it2++; continue;
|
||||
}
|
||||
@ -429,14 +448,13 @@ public:
|
||||
// The 'smallest' variable now could be identical with
|
||||
// it2 but we don't bother with that
|
||||
|
||||
if(!can_pack2) { it2++; continue; }
|
||||
|
||||
it3 = not_packed.begin();
|
||||
|
||||
double a3_sum = a2_sum + it3->get().area();
|
||||
double a3_sum = 0;
|
||||
|
||||
while(it3 != endit && !ret &&
|
||||
free_area - a3_sum <= waste) { // 3rd level
|
||||
free_area - (a3_sum = a2_sum + area(it3)) <= waste) {
|
||||
// 3rd level
|
||||
|
||||
if(it3 == it || it3 == it2 ||
|
||||
check_triplet(wrong_triplets, *it, *it2, *it3))
|
||||
@ -560,8 +578,11 @@ public:
|
||||
|
||||
if(do_parallel) dout() << "Parallel execution..." << "\n";
|
||||
|
||||
bool do_pairs = config_.try_pairs;
|
||||
bool do_triplets = config_.try_triplets;
|
||||
|
||||
// The DJD heuristic algorithm itself:
|
||||
auto packjob = [INITIAL_FILL_AREA, bin_area, w,
|
||||
auto packjob = [INITIAL_FILL_AREA, bin_area, w, do_triplets, do_pairs,
|
||||
&tryOneByOne,
|
||||
&tryGroupsOfTwo,
|
||||
&tryGroupsOfThree,
|
||||
@ -573,7 +594,7 @@ public:
|
||||
double waste = .0;
|
||||
bool lasttry = false;
|
||||
|
||||
while(!not_packed.empty() ) {
|
||||
while(!not_packed.empty()) {
|
||||
|
||||
{// Fill the bin up to INITIAL_FILL_PROPORTION of its capacity
|
||||
auto it = not_packed.begin();
|
||||
@ -594,26 +615,25 @@ public:
|
||||
// try pieses one by one
|
||||
while(tryOneByOne(placer, not_packed, waste, free_area,
|
||||
filled_area)) {
|
||||
if(lasttry) std::cout << "Lasttry monopack" << std::endl;
|
||||
waste = 0; lasttry = false;
|
||||
makeProgress(placer, idx, 1);
|
||||
}
|
||||
|
||||
// try groups of 2 pieses
|
||||
while(tryGroupsOfTwo(placer, not_packed, waste, free_area,
|
||||
while(do_pairs &&
|
||||
tryGroupsOfTwo(placer, not_packed, waste, free_area,
|
||||
filled_area)) {
|
||||
if(lasttry) std::cout << "Lasttry bipack" << std::endl;
|
||||
waste = 0; lasttry = false;
|
||||
makeProgress(placer, idx, 2);
|
||||
}
|
||||
|
||||
// // try groups of 3 pieses
|
||||
// while(tryGroupsOfThree(placer, not_packed, waste, free_area,
|
||||
// filled_area)) {
|
||||
// if(lasttry) std::cout << "Lasttry tripack" << std::endl;
|
||||
// waste = 0; lasttry = false;
|
||||
// makeProgress(placer, idx, 3);
|
||||
// }
|
||||
// try groups of 3 pieses
|
||||
while(do_triplets &&
|
||||
tryGroupsOfThree(placer, not_packed, waste, free_area,
|
||||
filled_area)) {
|
||||
waste = 0; lasttry = false;
|
||||
makeProgress(placer, idx, 3);
|
||||
}
|
||||
|
||||
waste += w;
|
||||
if(!lasttry && waste > free_area) lasttry = true;
|
||||
|
@ -55,6 +55,18 @@ public:
|
||||
this->progress_(--total);
|
||||
};
|
||||
|
||||
// Safety test: try to pack each item into an empty bin. If it fails
|
||||
// then it should be removed from the not_packed list
|
||||
{ auto it = store_.begin();
|
||||
while (it != store_.end()) {
|
||||
Placer p(bin);
|
||||
if(!p.pack(*it)) {
|
||||
auto itmp = it++;
|
||||
store_.erase(itmp);
|
||||
} else it++;
|
||||
}
|
||||
}
|
||||
|
||||
for(auto& item : store_ ) {
|
||||
bool was_packed = false;
|
||||
while(!was_packed) {
|
||||
|
@ -19,7 +19,6 @@
|
||||
#include <boost/nowide/iostream.hpp>
|
||||
#include <boost/algorithm/string/replace.hpp>
|
||||
|
||||
// #include <benchmark.h>
|
||||
#include "SVG.hpp"
|
||||
#include <Eigen/Dense>
|
||||
|
||||
@ -309,7 +308,7 @@ namespace arr {
|
||||
|
||||
using namespace libnest2d;
|
||||
|
||||
std::string toString(const Model& model) {
|
||||
std::string toString(const Model& model, bool holes = true) {
|
||||
std::stringstream ss;
|
||||
|
||||
ss << "{\n";
|
||||
@ -348,17 +347,17 @@ std::string toString(const Model& model) {
|
||||
|
||||
// Holes:
|
||||
ss << "\t\t{\n";
|
||||
// for(auto h : expoly.holes) {
|
||||
// ss << "\t\t\t{\n";
|
||||
// for(auto v : h.points) ss << "\t\t\t\t{"
|
||||
// << v.x << ", "
|
||||
// << v.y << "},\n";
|
||||
// {
|
||||
// auto v = h.points.front();
|
||||
// ss << "\t\t\t\t{" << v.x << ", " << v.y << "},\n";
|
||||
// }
|
||||
// ss << "\t\t\t},\n";
|
||||
// }
|
||||
if(holes) for(auto h : expoly.holes) {
|
||||
ss << "\t\t\t{\n";
|
||||
for(auto v : h.points) ss << "\t\t\t\t{"
|
||||
<< v.x << ", "
|
||||
<< v.y << "},\n";
|
||||
{
|
||||
auto v = h.points.front();
|
||||
ss << "\t\t\t\t{" << v.x << ", " << v.y << "},\n";
|
||||
}
|
||||
ss << "\t\t\t},\n";
|
||||
}
|
||||
ss << "\t\t},\n";
|
||||
|
||||
ss << "\t},\n";
|
||||
@ -477,58 +476,21 @@ bool arrange(Model &model, coordf_t dist, const Slic3r::BoundingBoxf* bb,
|
||||
// Create the arranger config
|
||||
auto min_obj_distance = static_cast<Coord>(dist/SCALING_FACTOR);
|
||||
|
||||
// Benchmark bench;
|
||||
|
||||
// std::cout << "Creating model siluett..." << std::endl;
|
||||
|
||||
// bench.start();
|
||||
// Get the 2D projected shapes with their 3D model instance pointers
|
||||
auto shapemap = arr::projectModelFromTop(model);
|
||||
// bench.stop();
|
||||
|
||||
// std::cout << "Model siluett created in " << bench.getElapsedSec()
|
||||
// << " seconds. " << "Min object distance = " << min_obj_distance << std::endl;
|
||||
|
||||
// std::cout << "{" << std::endl;
|
||||
// std::for_each(shapemap.begin(), shapemap.end(),
|
||||
// [] (ShapeData2D::value_type& it)
|
||||
// {
|
||||
// std::cout << "\t{" << std::endl;
|
||||
// Item& item = it.second;
|
||||
// for(auto& v : item) {
|
||||
// std::cout << "\t\t" << "{" << getX(v)
|
||||
// << ", " << getY(v) << "},\n";
|
||||
// }
|
||||
// std::cout << "\t}," << std::endl;
|
||||
// });
|
||||
// std::cout << "}" << std::endl;
|
||||
// return true;
|
||||
|
||||
bool hasbin = bb != nullptr && bb->defined;
|
||||
double area_max = 0;
|
||||
Item *biggest = nullptr;
|
||||
|
||||
// Copy the references for the shapes only as the arranger expects a
|
||||
// sequence of objects convertible to Item or ClipperPolygon
|
||||
std::vector<std::reference_wrapper<Item>> shapes;
|
||||
shapes.reserve(shapemap.size());
|
||||
std::for_each(shapemap.begin(), shapemap.end(),
|
||||
[&shapes, min_obj_distance, &area_max, &biggest,hasbin]
|
||||
[&shapes, min_obj_distance, &area_max, hasbin]
|
||||
(ShapeData2D::value_type& it)
|
||||
{
|
||||
if(!hasbin) {
|
||||
Item& item = it.second;
|
||||
item.addOffset(min_obj_distance);
|
||||
auto b = ShapeLike::boundingBox(item.transformedShape());
|
||||
auto a = b.width()*b.height();
|
||||
if(area_max < a) {
|
||||
area_max = static_cast<double>(a);
|
||||
biggest = &item;
|
||||
}
|
||||
}
|
||||
|
||||
shapes.push_back(std::ref(it.second));
|
||||
|
||||
});
|
||||
|
||||
Box bin;
|
||||
@ -546,9 +508,6 @@ bool arrange(Model &model, coordf_t dist, const Slic3r::BoundingBoxf* bb,
|
||||
static_cast<libnest2d::Coord>(bbb.max.x),
|
||||
static_cast<libnest2d::Coord>(bbb.max.y)
|
||||
});
|
||||
} else {
|
||||
// Just take the biggest item as bin... ?
|
||||
bin = ShapeLike::boundingBox(biggest->transformedShape());
|
||||
}
|
||||
|
||||
// Will use the DJD selection heuristic with the BottomLeft placement
|
||||
@ -563,20 +522,22 @@ bool arrange(Model &model, coordf_t dist, const Slic3r::BoundingBoxf* bb,
|
||||
// Align the arranged pile into the center of the bin
|
||||
pcfg.alignment = PConf::Alignment::CENTER;
|
||||
|
||||
// Start placing the items from the center of the print bed
|
||||
pcfg.starting_point = PConf::Alignment::CENTER;
|
||||
|
||||
// TODO cannot use rotations until multiple objects of same geometry can
|
||||
// handle different rotations
|
||||
// arranger.useMinimumBoundigBoxRotation();
|
||||
pcfg.rotations = { 0.0 };
|
||||
|
||||
// Magic: we will specify what is the goal of arrangement...
|
||||
// In this case we override the default object function because we
|
||||
// (apparently) don't care about pack efficiency and all we care is that the
|
||||
// larger items go into the center of the pile and smaller items orbit it
|
||||
// so the resulting pile has a circle-like shape.
|
||||
// This is good for the print bed's heat profile.
|
||||
// As a side effect, the arrange procedure is a lot faster (we do not need
|
||||
// to calculate the convex hulls)
|
||||
pcfg.object_function = [&bin](
|
||||
// In this case we override the default object to make the larger items go
|
||||
// into the center of the pile and smaller items orbit it so the resulting
|
||||
// pile has a circle-like shape. This is good for the print bed's heat
|
||||
// profile. We alse sacrafice a bit of pack efficiency for this to work. As
|
||||
// a side effect, the arrange procedure is a lot faster (we do not need to
|
||||
// calculate the convex hulls)
|
||||
pcfg.object_function = [bin, hasbin](
|
||||
NfpPlacer::Pile pile, // The currently arranged pile
|
||||
double /*area*/, // Sum area of items (not needed)
|
||||
double norm, // A norming factor for physical dimensions
|
||||
@ -584,14 +545,25 @@ bool arrange(Model &model, coordf_t dist, const Slic3r::BoundingBoxf* bb,
|
||||
{
|
||||
auto bb = ShapeLike::boundingBox(pile);
|
||||
|
||||
// We will optimize to the diameter of the circle around the bounding
|
||||
// box and use the norming factor to get rid of the physical dimensions
|
||||
double score = PointLike::distance(bb.minCorner(),
|
||||
bb.maxCorner()) / norm;
|
||||
// We get the current item that's being evaluated.
|
||||
auto& sh = pile.back();
|
||||
|
||||
// We retrieve the reference point of this item
|
||||
auto rv = Nfp::referenceVertex(sh);
|
||||
|
||||
// We get the distance of the reference point from the center of the
|
||||
// heat bed
|
||||
auto c = bin.center();
|
||||
auto d = PointLike::distance(rv, c);
|
||||
|
||||
// The score will be the normalized distance which will be minimized,
|
||||
// effectively creating a circle shaped pile of items
|
||||
double score = double(d)/norm;
|
||||
|
||||
// If it does not fit into the print bed we will beat it
|
||||
// with a large penality
|
||||
if(!NfpPlacer::wouldFit(bb, bin)) score = 2*penality - score;
|
||||
// with a large penality. If we would not do this, there would be only
|
||||
// one big pile that doesn't care whether it fits onto the print bed.
|
||||
if(hasbin && !NfpPlacer::wouldFit(bb, bin)) score = 2*penality - score;
|
||||
|
||||
return score;
|
||||
};
|
||||
@ -602,18 +574,10 @@ bool arrange(Model &model, coordf_t dist, const Slic3r::BoundingBoxf* bb,
|
||||
// Set the progress indicator for the arranger.
|
||||
arranger.progressIndicator(progressind);
|
||||
|
||||
// std::cout << "Arranging model..." << std::endl;
|
||||
// bench.start();
|
||||
|
||||
// Arrange and return the items with their respective indices within the
|
||||
// input sequence.
|
||||
auto result = arranger.arrangeIndexed(shapes.begin(), shapes.end());
|
||||
|
||||
// bench.stop();
|
||||
// std::cout << "Model arranged in " << bench.getElapsedSec()
|
||||
// << " seconds." << std::endl;
|
||||
|
||||
|
||||
auto applyResult = [&shapemap](ArrangeResult::value_type& group,
|
||||
Coord batch_offset)
|
||||
{
|
||||
@ -637,8 +601,6 @@ bool arrange(Model &model, coordf_t dist, const Slic3r::BoundingBoxf* bb,
|
||||
}
|
||||
};
|
||||
|
||||
// std::cout << "Applying result..." << std::endl;
|
||||
// bench.start();
|
||||
if(first_bin_only) {
|
||||
applyResult(result.front(), 0);
|
||||
} else {
|
||||
@ -658,9 +620,6 @@ bool arrange(Model &model, coordf_t dist, const Slic3r::BoundingBoxf* bb,
|
||||
batch_offset += stride;
|
||||
}
|
||||
}
|
||||
// bench.stop();
|
||||
// std::cout << "Result applied in " << bench.getElapsedSec()
|
||||
// << " seconds." << std::endl;
|
||||
|
||||
for(auto objptr : model.objects) objptr->invalidate_bounding_box();
|
||||
|
||||
@ -675,16 +634,11 @@ bool Model::arrange_objects(coordf_t dist, const BoundingBoxf* bb,
|
||||
{
|
||||
bool ret = false;
|
||||
if(bb != nullptr && bb->defined) {
|
||||
// Despite the new arrange is able to run without a specified bin,
|
||||
// the perl testsuit still fails for this case. For now the safest
|
||||
// thing to do is to use the new arrange only when a proper bin is
|
||||
// specified.
|
||||
ret = arr::arrange(*this, dist, bb, false, progressind);
|
||||
// std::fstream out("out.cpp", std::fstream::out);
|
||||
// if(out.good()) {
|
||||
// out << "const TestData OBJECTS = \n";
|
||||
// out << arr::toString(*this);
|
||||
// }
|
||||
// out.close();
|
||||
// SVG svg("out.svg");
|
||||
// arr::toSVG(svg, *this);
|
||||
// svg.Close();
|
||||
} else {
|
||||
// get the (transformed) size of each instance so that we take
|
||||
// into account their different transformations when packing
|
||||
|
Loading…
Reference in New Issue
Block a user