Merge branch 'master' of https://github.com/Prusa-Development/PrusaSlicerPrivate into et_opengl_es_rebase
@ -34,6 +34,7 @@ option(SLIC3R_MSVC_PDB "Generate PDB files on MSVC in Release mode" 1)
|
||||
option(SLIC3R_PERL_XS "Compile XS Perl module and enable Perl unit and integration tests" 0)
|
||||
option(SLIC3R_ASAN "Enable ASan on Clang and GCC" 0)
|
||||
option(SLIC3R_UBSAN "Enable UBSan on Clang and GCC" 0)
|
||||
option(SLIC3R_ENABLE_FORMAT_STEP "Enable compilation of STEP file support" 1)
|
||||
# If SLIC3R_FHS is 1 -> SLIC3R_DESKTOP_INTEGRATION is always 0, othrewise variable.
|
||||
CMAKE_DEPENDENT_OPTION(SLIC3R_DESKTOP_INTEGRATION "Allow perfoming desktop integration during runtime" 1 "NOT SLIC3R_FHS" 0)
|
||||
|
||||
|
6
deps/CMakeLists.txt
vendored
@ -75,6 +75,9 @@ function(prusaslicer_add_cmake_project projectname)
|
||||
if (MSVC)
|
||||
set(_gen CMAKE_GENERATOR "${DEP_MSVC_GEN}" CMAKE_GENERATOR_PLATFORM "${DEP_PLATFORM}")
|
||||
set(_build_j "/m")
|
||||
if (${projectname} STREQUAL "OCCT")
|
||||
set(_build_j "/m:1")
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
ExternalProject_Add(
|
||||
@ -188,7 +191,9 @@ endif ()
|
||||
|
||||
include(JPEG/JPEG.cmake)
|
||||
include(TIFF/TIFF.cmake)
|
||||
include(NanoSVG/NanoSVG.cmake)
|
||||
include(wxWidgets/wxWidgets.cmake)
|
||||
include(OCCT/OCCT.cmake)
|
||||
|
||||
set(_dep_list
|
||||
dep_Boost
|
||||
@ -201,6 +206,7 @@ set(_dep_list
|
||||
dep_OpenCSG
|
||||
dep_CGAL
|
||||
dep_Qhull
|
||||
dep_OCCT
|
||||
${PNG_PKG}
|
||||
${ZLIB_PKG}
|
||||
${EXPAT_PKG}
|
||||
|
4
deps/GLEW/GLEW.cmake
vendored
@ -4,8 +4,8 @@ find_package(OpenGL QUIET REQUIRED)
|
||||
|
||||
prusaslicer_add_cmake_project(
|
||||
GLEW
|
||||
URL https://sourceforge.net/projects/glew/files/glew/2.1.0/glew-2.1.0.zip
|
||||
URL_HASH SHA256=2700383d4de2455f06114fbaf872684f15529d4bdc5cdea69b5fb0e9aa7763f1
|
||||
URL https://sourceforge.net/projects/glew/files/glew/2.2.0/glew-2.2.0.zip
|
||||
URL_HASH SHA256=a9046a913774395a095edcc0b0ac2d81c3aacca61787b39839b941e9be14e0d4
|
||||
SOURCE_SUBDIR build/cmake
|
||||
CMAKE_ARGS
|
||||
-DBUILD_UTILS=OFF
|
||||
|
4
deps/NanoSVG/NanoSVG.cmake
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
prusaslicer_add_cmake_project(NanoSVG
|
||||
URL https://github.com/memononen/nanosvg/archive/4c8f0139b62c6e7faa3b67ce1fbe6e63590ed148.zip
|
||||
URL_HASH SHA256=584e084af1a75bf633f79753ce2f6f6ec8686002ca27f35f1037c25675fecfb6
|
||||
)
|
22
deps/OCCT/OCCT.cmake
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
prusaslicer_add_cmake_project(OCCT
|
||||
#LMBBS: changed version to 7.6.2
|
||||
URL https://github.com/Open-Cascade-SAS/OCCT/archive/refs/tags/V7_6_2.zip
|
||||
URL_HASH SHA256=c696b923593e8c18d059709717dbf155b3e72fdd283c8522047a790ec3a432c5
|
||||
|
||||
CMAKE_ARGS
|
||||
-DINSTALL_DIR_LAYOUT=Unix # LMBBS
|
||||
-DBUILD_LIBRARY_TYPE=Static
|
||||
-DUSE_TK=OFF
|
||||
-DUSE_TBB=OFF
|
||||
-DUSE_FREETYPE=OFF
|
||||
-DUSE_FFMPEG=OFF
|
||||
-DUSE_VTK=OFF
|
||||
-DUSE_FREETYPE=OFF
|
||||
-DBUILD_MODULE_ApplicationFramework=OFF
|
||||
#-DBUILD_MODULE_DataExchange=OFF
|
||||
-DBUILD_MODULE_Draw=OFF
|
||||
-DBUILD_MODULE_FoundationClasses=OFF
|
||||
-DBUILD_MODULE_ModelingAlgorithms=OFF
|
||||
-DBUILD_MODULE_ModelingData=OFF
|
||||
-DBUILD_MODULE_Visualization=OFF
|
||||
)
|
15
deps/wxWidgets/wxWidgets.cmake
vendored
@ -1,5 +1,3 @@
|
||||
set(_wx_git_tag v3.1.4-patched)
|
||||
|
||||
set(_wx_toolkit "")
|
||||
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||
set(_gtk_ver 2)
|
||||
@ -15,11 +13,9 @@ if (UNIX AND NOT APPLE) # wxWidgets will not use char as the underlying type for
|
||||
endif()
|
||||
|
||||
prusaslicer_add_cmake_project(wxWidgets
|
||||
# GIT_REPOSITORY "https://github.com/prusa3d/wxWidgets"
|
||||
# GIT_TAG tm_cross_compile #${_wx_git_tag}
|
||||
URL https://github.com/prusa3d/wxWidgets/archive/489f6118256853cf5b299d595868641938566cdb.zip
|
||||
URL_HASH SHA256=5b22d465377cedd8044bba69bea958b248953fd3628c1de4913a84d4e6f6175b
|
||||
DEPENDS ${PNG_PKG} ${ZLIB_PKG} ${EXPAT_PKG} dep_TIFF dep_JPEG
|
||||
URL https://github.com/prusa3d/wxWidgets/archive/2a0b365df947138c513a888d707d46248d78a341.zip
|
||||
URL_HASH SHA256=9ab05cd5179196fad4ae702c78eaae9418e73a402cfd390f7438e469b13eb735
|
||||
DEPENDS ${PNG_PKG} ${ZLIB_PKG} ${EXPAT_PKG} dep_TIFF dep_JPEG dep_NanoSVG
|
||||
CMAKE_ARGS
|
||||
-DwxBUILD_PRECOMP=ON
|
||||
${_wx_toolkit}
|
||||
@ -32,13 +28,16 @@ prusaslicer_add_cmake_project(wxWidgets
|
||||
-DwxUSE_OPENGL=ON
|
||||
-DwxUSE_LIBPNG=sys
|
||||
-DwxUSE_ZLIB=sys
|
||||
-DwxUSE_REGEX=builtin
|
||||
-DwxUSE_NANOSVG=sys
|
||||
-DwxUSE_NANOSVG_EXTERNAL=ON
|
||||
-DwxUSE_REGEX=OFF
|
||||
-DwxUSE_LIBXPM=builtin
|
||||
-DwxUSE_LIBJPEG=sys
|
||||
-DwxUSE_LIBTIFF=sys
|
||||
-DwxUSE_EXPAT=sys
|
||||
-DwxUSE_LIBSDL=OFF
|
||||
-DwxUSE_XTEST=OFF
|
||||
-DwxUSE_GLCANVAS_EGL=OFF
|
||||
)
|
||||
|
||||
if (MSVC)
|
||||
|
@ -1,4 +1,5 @@
|
||||
min_slic3r_version = 2.5.0-alpha0
|
||||
0.2.0 Added alternative nozzle support
|
||||
0.1.5 Added Ender-3 S1 Pro
|
||||
min_slic3r_version = 2.4.1
|
||||
0.1.4 Added Ender-3 Pro. Added M25 support for some printers.
|
||||
|
BIN
resources/profiles/Creality/CR10SMARTPRO_thumbnail.png
Normal file
After Width: | Height: | Size: 35 KiB |
BIN
resources/profiles/Creality/ENDER3MAXNEO_thumbnail.png
Normal file
After Width: | Height: | Size: 41 KiB |
BIN
resources/profiles/Creality/ENDER3V2NEO_thumbnail.png
Normal file
After Width: | Height: | Size: 34 KiB |
2
resources/profiles/Elegoo.idx
Normal file
@ -0,0 +1,2 @@
|
||||
min_slic3r_version = 2.5.0-alpha3
|
||||
1.0.0 Initial version
|
510
resources/profiles/Elegoo.ini
Normal file
@ -0,0 +1,510 @@
|
||||
# PrusaSlicer print profiles for the Elegoo printers.
|
||||
# By Andrew Suzuki (andrewsuzuki.com), adapted from Creality.ini
|
||||
|
||||
[vendor]
|
||||
# Vendor name will be shown by the Config Wizard.
|
||||
name = Elegoo
|
||||
# Configuration version of this file. Config file will only be installed, if the config_version differs.
|
||||
# This means, the server may force the PrusaSlicer configuration to be downgraded.
|
||||
config_version = 1.0.0
|
||||
config_update_url = https://files.prusa3d.com/wp-content/uploads/repository/PrusaSlicer-settings-master/live/Elegoo/
|
||||
|
||||
# The printer models will be shown by the Configuration Wizard in this order,
|
||||
# also the first model installed & the first nozzle installed will be activated after install.
|
||||
# Printer model name will be shown by the installation wizard.
|
||||
|
||||
[printer_model:NEPTUNE1]
|
||||
name = Elegoo Neptune-1
|
||||
variants = 0.4
|
||||
technology = FFF
|
||||
family = NEPTUNE
|
||||
bed_model =
|
||||
bed_texture =
|
||||
default_materials = Generic PLA @ELEGOO; Generic PETG @ELEGOO; Generic ABS @ELEGOO
|
||||
|
||||
[printer_model:NEPTUNE2]
|
||||
name = Elegoo Neptune-2
|
||||
variants = 0.4
|
||||
technology = FFF
|
||||
family = NEPTUNE
|
||||
bed_model =
|
||||
bed_texture =
|
||||
default_materials = Generic PLA @ELEGOO; Generic PETG @ELEGOO; Generic ABS @ELEGOO
|
||||
|
||||
[printer_model:NEPTUNE2D]
|
||||
name = Elegoo Neptune-2D
|
||||
variants = 0.4
|
||||
technology = FFF
|
||||
family = NEPTUNE
|
||||
bed_model =
|
||||
bed_texture =
|
||||
default_materials = Generic PLA @ELEGOO; Generic PETG @ELEGOO; Generic ABS @ELEGOO
|
||||
|
||||
[printer_model:NEPTUNE2S]
|
||||
name = Elegoo Neptune-2S
|
||||
variants = 0.4
|
||||
technology = FFF
|
||||
family = NEPTUNE
|
||||
bed_model =
|
||||
bed_texture =
|
||||
default_materials = Generic PLA @ELEGOO; Generic PETG @ELEGOO; Generic ABS @ELEGOO
|
||||
|
||||
[printer_model:NEPTUNE3]
|
||||
name = Elegoo Neptune-3
|
||||
variants = 0.4
|
||||
technology = FFF
|
||||
family = NEPTUNE
|
||||
bed_model =
|
||||
bed_texture =
|
||||
default_materials = Generic PLA @ELEGOO; Generic PETG @ELEGOO; Generic ABS @ELEGOO
|
||||
|
||||
[printer_model:NEPTUNEX]
|
||||
name = Elegoo Neptune-X
|
||||
variants = 0.4
|
||||
technology = FFF
|
||||
family = NEPTUNE
|
||||
bed_model =
|
||||
bed_texture =
|
||||
default_materials = Generic PLA @ELEGOO; Generic PETG @ELEGOO; Generic ABS @ELEGOO
|
||||
|
||||
# All presets starting with asterisk, for example *common*, are intermediate and they will
|
||||
# not make it into the user interface.
|
||||
|
||||
# Common print preset
|
||||
[print:*common*]
|
||||
avoid_crossing_perimeters = 0
|
||||
bridge_angle = 0
|
||||
bridge_flow_ratio = 0.95
|
||||
bridge_speed = 25
|
||||
brim_width = 0
|
||||
clip_multipart_objects = 1
|
||||
compatible_printers =
|
||||
complete_objects = 0
|
||||
dont_support_bridges = 1
|
||||
elefant_foot_compensation = 0.1
|
||||
ensure_vertical_shell_thickness = 1
|
||||
external_fill_pattern = rectilinear
|
||||
external_perimeters_first = 0
|
||||
external_perimeter_extrusion_width = 0.45
|
||||
external_perimeter_speed = 25
|
||||
extra_perimeters = 0
|
||||
extruder_clearance_height = 25
|
||||
extruder_clearance_radius = 45
|
||||
extrusion_width = 0.45
|
||||
fill_angle = 45
|
||||
fill_density = 20%
|
||||
fill_pattern = grid
|
||||
first_layer_extrusion_width = 0.42
|
||||
first_layer_height = 0.2
|
||||
first_layer_speed = 20
|
||||
gap_fill_speed = 30
|
||||
gcode_comments = 0
|
||||
infill_every_layers = 1
|
||||
infill_extruder = 1
|
||||
infill_extrusion_width = 0.45
|
||||
infill_first = 0
|
||||
infill_only_where_needed = 0
|
||||
infill_overlap = 25%
|
||||
infill_speed = 50
|
||||
interface_shells = 0
|
||||
max_print_speed = 100
|
||||
max_volumetric_extrusion_rate_slope_negative = 0
|
||||
max_volumetric_extrusion_rate_slope_positive = 0
|
||||
max_volumetric_speed = 0
|
||||
min_skirt_length = 4
|
||||
notes =
|
||||
overhangs = 0
|
||||
only_retract_when_crossing_perimeters = 0
|
||||
ooze_prevention = 0
|
||||
output_filename_format = {input_filename_base}_{layer_height}mm_{filament_type[0]}_{printer_model}_{print_time}.gcode
|
||||
perimeters = 2
|
||||
perimeter_extruder = 1
|
||||
perimeter_extrusion_width = 0.45
|
||||
perimeter_speed = 40
|
||||
post_process =
|
||||
print_settings_id =
|
||||
raft_layers = 0
|
||||
resolution = 0
|
||||
seam_position = nearest
|
||||
single_extruder_multi_material_priming = 0
|
||||
skirts = 1
|
||||
skirt_distance = 2
|
||||
skirt_height = 2
|
||||
small_perimeter_speed = 25
|
||||
solid_infill_below_area = 0
|
||||
solid_infill_every_layers = 0
|
||||
solid_infill_extruder = 1
|
||||
solid_infill_extrusion_width = 0.45
|
||||
solid_infill_speed = 40
|
||||
spiral_vase = 0
|
||||
standby_temperature_delta = -5
|
||||
support_material = 0
|
||||
support_material_extruder = 0
|
||||
support_material_extrusion_width = 0.38
|
||||
support_material_interface_extruder = 0
|
||||
support_material_angle = 0
|
||||
support_material_buildplate_only = 0
|
||||
support_material_enforce_layers = 0
|
||||
support_material_contact_distance = 0.15
|
||||
support_material_interface_contact_loops = 0
|
||||
support_material_interface_layers = 2
|
||||
support_material_interface_spacing = 0.2
|
||||
support_material_interface_speed = 100%
|
||||
support_material_pattern = rectilinear
|
||||
support_material_spacing = 2
|
||||
support_material_speed = 40
|
||||
support_material_synchronize_layers = 0
|
||||
support_material_threshold = 45
|
||||
support_material_with_sheath = 0
|
||||
support_material_xy_spacing = 60%
|
||||
thin_walls = 0
|
||||
top_infill_extrusion_width = 0.4
|
||||
top_solid_infill_speed = 30
|
||||
travel_speed = 150
|
||||
wipe_tower = 0
|
||||
wipe_tower_bridging = 10
|
||||
wipe_tower_rotation_angle = 0
|
||||
wipe_tower_width = 60
|
||||
wipe_tower_x = 170
|
||||
wipe_tower_y = 140
|
||||
xy_size_compensation = 0
|
||||
|
||||
[print:*0.08mm*]
|
||||
inherits = *common*
|
||||
layer_height = 0.08
|
||||
perimeters = 3
|
||||
bottom_solid_layers = 9
|
||||
top_solid_layers = 11
|
||||
|
||||
[print:*0.10mm*]
|
||||
inherits = *common*
|
||||
layer_height = 0.1
|
||||
perimeters = 3
|
||||
bottom_solid_layers = 7
|
||||
top_solid_layers = 9
|
||||
|
||||
[print:*0.12mm*]
|
||||
inherits = *common*
|
||||
layer_height = 0.12
|
||||
perimeters = 3
|
||||
bottom_solid_layers = 6
|
||||
top_solid_layers = 7
|
||||
|
||||
[print:*0.16mm*]
|
||||
inherits = *common*
|
||||
layer_height = 0.16
|
||||
bottom_solid_layers = 5
|
||||
top_solid_layers = 7
|
||||
|
||||
[print:*0.20mm*]
|
||||
inherits = *common*
|
||||
layer_height = 0.20
|
||||
bottom_solid_layers = 4
|
||||
top_solid_layers = 5
|
||||
|
||||
[print:*0.24mm*]
|
||||
inherits = *common*
|
||||
layer_height = 0.24
|
||||
top_infill_extrusion_width = 0.45
|
||||
bottom_solid_layers = 3
|
||||
top_solid_layers = 4
|
||||
|
||||
[print:*0.28mm*]
|
||||
inherits = *common*
|
||||
layer_height = 0.28
|
||||
top_infill_extrusion_width = 0.45
|
||||
bottom_solid_layers = 3
|
||||
top_solid_layers = 4
|
||||
|
||||
[print:0.08mm SUPERDETAIL @ELEGOO]
|
||||
inherits = *0.08mm*
|
||||
compatible_printers_condition = printer_model=~/(NEPTUNE).*/ and nozzle_diameter[0]==0.4
|
||||
|
||||
[print:0.10mm HIGHDETAIL @ELEGOO]
|
||||
inherits = *0.10mm*
|
||||
compatible_printers_condition = printer_model=~/(NEPTUNE).*/ and nozzle_diameter[0]==0.4
|
||||
|
||||
[print:0.12mm DETAIL @ELEGOO]
|
||||
inherits = *0.12mm*
|
||||
compatible_printers_condition = printer_model=~/(NEPTUNE).*/ and nozzle_diameter[0]==0.4
|
||||
|
||||
[print:0.16mm OPTIMAL @ELEGOO]
|
||||
inherits = *0.16mm*
|
||||
compatible_printers_condition = printer_model=~/(NEPTUNE).*/ and nozzle_diameter[0]==0.4
|
||||
|
||||
[print:0.20mm NORMAL @ELEGOO]
|
||||
inherits = *0.20mm*
|
||||
compatible_printers_condition = printer_model=~/(NEPTUNE).*/ and nozzle_diameter[0]==0.4
|
||||
|
||||
[print:0.24mm DRAFT @ELEGOO]
|
||||
inherits = *0.24mm*
|
||||
compatible_printers_condition = printer_model=~/(NEPTUNE).*/ and nozzle_diameter[0]==0.4
|
||||
|
||||
[print:0.28mm SUPERDRAFT @ELEGOO]
|
||||
inherits = *0.28mm*
|
||||
compatible_printers_condition = printer_model=~/(NEPTUNE).*/ and nozzle_diameter[0]==0.4
|
||||
|
||||
# When submitting new filaments please print the following temperature tower at 0.1mm layer height:
|
||||
# https://www.thingiverse.com/thing:2615842
|
||||
# Pay particular attention to bridging, overhangs and retractions.
|
||||
# Also print the following bed adhesion test at 0.1 layer height as well:
|
||||
# https://www.prusaprinters.org/prints/4634-bed-adhesion-warp-test
|
||||
# At least for PLA, please keep bed temp at 60, as many Elegoo printers do not have any ABL
|
||||
# So having some leeway to get good bed adhesion is not a luxury for many users
|
||||
|
||||
[filament:*common*]
|
||||
cooling = 0
|
||||
compatible_printers =
|
||||
extrusion_multiplier = 1
|
||||
filament_cost = 0
|
||||
filament_density = 0
|
||||
filament_diameter = 1.75
|
||||
filament_notes = ""
|
||||
filament_settings_id = ""
|
||||
filament_soluble = 0
|
||||
min_print_speed = 15
|
||||
slowdown_below_layer_time = 20
|
||||
compatible_printers_condition = printer_notes=~/.*PRINTER_VENDOR_ELEGOO.*/
|
||||
|
||||
[filament:*PLA*]
|
||||
inherits = *common*
|
||||
bed_temperature = 60
|
||||
fan_below_layer_time = 100
|
||||
filament_colour = #DDDDDD
|
||||
filament_max_volumetric_speed = 15
|
||||
filament_type = PLA
|
||||
filament_density = 1.24
|
||||
filament_cost = 20
|
||||
first_layer_bed_temperature = 60
|
||||
first_layer_temperature = 210
|
||||
fan_always_on = 1
|
||||
cooling = 1
|
||||
max_fan_speed = 100
|
||||
min_fan_speed = 100
|
||||
bridge_fan_speed = 100
|
||||
disable_fan_first_layers = 1
|
||||
temperature = 205
|
||||
|
||||
[filament:*PET*]
|
||||
inherits = *common*
|
||||
bed_temperature = 70
|
||||
cooling = 1
|
||||
disable_fan_first_layers = 3
|
||||
fan_below_layer_time = 20
|
||||
filament_colour = #DDDDDD
|
||||
filament_max_volumetric_speed = 8
|
||||
filament_type = PETG
|
||||
filament_density = 1.27
|
||||
filament_cost = 20
|
||||
first_layer_bed_temperature = 70
|
||||
first_layer_temperature = 240
|
||||
fan_always_on = 1
|
||||
max_fan_speed = 50
|
||||
min_fan_speed = 20
|
||||
bridge_fan_speed = 100
|
||||
temperature = 240
|
||||
|
||||
[filament:*ABS*]
|
||||
inherits = *common*
|
||||
bed_temperature = 100
|
||||
cooling = 0
|
||||
disable_fan_first_layers = 3
|
||||
fan_below_layer_time = 20
|
||||
filament_colour = #DDDDDD
|
||||
filament_max_volumetric_speed = 11
|
||||
filament_type = ABS
|
||||
filament_density = 1.04
|
||||
filament_cost = 20
|
||||
first_layer_bed_temperature = 100
|
||||
first_layer_temperature = 245
|
||||
fan_always_on = 0
|
||||
max_fan_speed = 0
|
||||
min_fan_speed = 0
|
||||
bridge_fan_speed = 30
|
||||
top_fan_speed = 0
|
||||
temperature = 245
|
||||
|
||||
[filament:Generic PLA @ELEGOO]
|
||||
inherits = *PLA*
|
||||
filament_vendor = Generic
|
||||
|
||||
[filament:Generic PETG @ELEGOO]
|
||||
inherits = *PET*
|
||||
filament_vendor = Generic
|
||||
|
||||
[filament:Generic ABS @ELEGOO]
|
||||
inherits = *ABS*
|
||||
first_layer_bed_temperature = 90
|
||||
bed_temperature = 90
|
||||
filament_vendor = Generic
|
||||
|
||||
# Common printer preset
|
||||
[printer:*common*]
|
||||
printer_technology = FFF
|
||||
before_layer_gcode = ;BEFORE_LAYER_CHANGE\nG92 E0\n;[layer_z]\n\n
|
||||
bed_shape = 0x0,235x0,235x235,0x235
|
||||
between_objects_gcode =
|
||||
pause_print_gcode =
|
||||
deretract_speed = 0
|
||||
extruder_colour = #FCE94F
|
||||
extruder_offset = 0x0
|
||||
gcode_flavor = marlin
|
||||
silent_mode = 0
|
||||
remaining_times = 0
|
||||
machine_max_acceleration_e = 5000
|
||||
machine_max_acceleration_extruding = 500
|
||||
machine_max_acceleration_retracting = 1000
|
||||
machine_max_acceleration_x = 500
|
||||
machine_max_acceleration_y = 500
|
||||
machine_max_acceleration_z = 100
|
||||
machine_max_feedrate_e = 60
|
||||
machine_max_feedrate_x = 500
|
||||
machine_max_feedrate_y = 500
|
||||
machine_max_feedrate_z = 10
|
||||
machine_max_jerk_e = 5
|
||||
machine_max_jerk_x = 8
|
||||
machine_max_jerk_y = 8
|
||||
machine_max_jerk_z = 0.4
|
||||
machine_min_extruding_rate = 0
|
||||
machine_min_travel_rate = 0
|
||||
layer_gcode = ;AFTER_LAYER_CHANGE\n;[layer_z]
|
||||
max_layer_height = 0.3
|
||||
min_layer_height = 0.07
|
||||
max_print_height = 250
|
||||
nozzle_diameter = 0.4
|
||||
printer_notes =
|
||||
printer_settings_id =
|
||||
retract_before_travel = 1
|
||||
retract_before_wipe = 0%
|
||||
retract_layer_change = 1
|
||||
retract_length = 1
|
||||
retract_length_toolchange = 1
|
||||
retract_lift = 0
|
||||
retract_lift_above = 0
|
||||
retract_lift_below = 0
|
||||
retract_restart_extra = 0
|
||||
retract_restart_extra_toolchange = 0
|
||||
retract_speed = 35
|
||||
single_extruder_multi_material = 0
|
||||
thumbnails = 16x16,220x124
|
||||
toolchange_gcode =
|
||||
use_firmware_retraction = 0
|
||||
use_relative_e_distances = 1
|
||||
use_volumetric_e = 0
|
||||
variable_layer_height = 1
|
||||
wipe = 1
|
||||
z_offset = 0
|
||||
printer_model =
|
||||
default_print_profile = 0.16mm OPTIMAL @ELEGOO
|
||||
default_filament_profile = Generic PLA @ELEGOO
|
||||
|
||||
[printer:Elegoo Neptune-2]
|
||||
inherits = *common*
|
||||
printer_model = NEPTUNE2
|
||||
printer_variant = 0.4
|
||||
max_layer_height = 0.28
|
||||
min_layer_height = 0.08
|
||||
printer_notes = Do not remove the following keywords! These keywords are used in the "compatible printer" condition of the print and filament profiles to link the particular print and filament profiles to this printer profile.\nPRINTER_VENDOR_ELEGOO\nPRINTER_MODEL_NEPTUNE2\nPRINTER_HAS_BOWDEN
|
||||
max_print_height = 250
|
||||
machine_max_acceleration_e = 5000
|
||||
machine_max_acceleration_extruding = 500
|
||||
machine_max_acceleration_retracting = 1000
|
||||
machine_max_acceleration_x = 500
|
||||
machine_max_acceleration_y = 500
|
||||
machine_max_acceleration_z = 100
|
||||
machine_max_feedrate_e = 60
|
||||
machine_max_feedrate_x = 500
|
||||
machine_max_feedrate_y = 500
|
||||
machine_max_feedrate_z = 10
|
||||
machine_max_jerk_e = 5
|
||||
machine_max_jerk_x = 8
|
||||
machine_max_jerk_y = 8
|
||||
machine_max_jerk_z = 0.4
|
||||
machine_min_extruding_rate = 0
|
||||
machine_min_travel_rate = 0
|
||||
nozzle_diameter = 0.4
|
||||
retract_before_travel = 2
|
||||
retract_length = 5
|
||||
retract_speed = 60
|
||||
deretract_speed = 40
|
||||
retract_before_wipe = 70%
|
||||
start_gcode = G90 ; use absolute coordinates\nM83 ; extruder relative mode\nM104 S120 ; set temporary nozzle temp to prevent oozing during homing and auto bed leveling\nM140 S[first_layer_bed_temperature] ; set final bed temp\nG4 S10 ; allow partial nozzle warmup\nG28 ; home all axis\nG1 Z50 F240\nG1 X2 Y10 F3000\nM104 S[first_layer_temperature] ; set final nozzle temp\nM190 S[first_layer_bed_temperature] ; wait for bed temp to stabilize\nM109 S[first_layer_temperature] ; wait for nozzle temp to stabilize\nG1 Z0.28 F240\nG92 E0\nG1 Y140 E10 F1500 ; prime the nozzle\nG1 X2.3 F5000\nG92 E0\nG1 Y10 E10 F1200 ; prime the nozzle\nG92 E0
|
||||
end_gcode = {if max_layer_z < max_print_height}G1 Z{z_offset+min(max_layer_z+2, max_print_height)} F600 ; Move print head up{endif}\nG1 X5 Y{print_bed_max[1]*0.8} F{travel_speed*60} ; present print\n{if max_layer_z < max_print_height-10}G1 Z{z_offset+min(max_layer_z+70, max_print_height-10)} F600 ; Move print head further up{endif}\n{if max_layer_z < max_print_height*0.6}G1 Z{max_print_height*0.6} F600 ; Move print head further up{endif}\nM140 S0 ; turn off heatbed\nM104 S0 ; turn off temperature\nM107 ; turn off fan\nM84 X Y E ; disable motors
|
||||
|
||||
|
||||
# Intended for printers with a smaller bed
|
||||
# [printer:*fastabl*]
|
||||
# start_gcode = G90 ; use absolute coordinates\nM83 ; extruder relative mode\nM104 S120 ; set temporary nozzle temp to prevent oozing during homing and auto bed leveling\nM140 S[first_layer_bed_temperature] ; set final bed temp\nG4 S10 ; allow partial nozzle warmup\nG28 ; home all axis\nG29 ; auto bed levelling\nG1 Z50 F240\nG1 X2 Y10 F3000\nM104 S[first_layer_temperature] ; set final nozzle temp\nM190 S[first_layer_bed_temperature] ; wait for bed temp to stabilize\nM109 S[first_layer_temperature] ; wait for nozzle temp to stabilize\nG1 Z0.28 F240\nG92 E0\nG1 Y140 E10 F1500 ; prime the nozzle\nG1 X2.3 F5000\nG92 E0\nG1 Y10 E10 F1200 ; prime the nozzle\nG92 E0
|
||||
|
||||
# Intended for printers with a larger bed
|
||||
# [printer:*slowabl*]
|
||||
# start_gcode = G90 ; use absolute coordinates\nM83 ; extruder relative mode\nM104 S120 ; set temporary nozzle temp to prevent oozing during homing and auto bed leveling\nM140 S[first_layer_bed_temperature] ; set final bed temp\nM190 S[first_layer_bed_temperature] ; wait for bed temp to stabilize\nG28 ; home all axis\nG29 ; auto bed levelling\nG1 Z50 F240\nG1 X2 Y10 F3000\nM104 S[first_layer_temperature] ; set final nozzle temp\nM109 S[first_layer_temperature] ; wait for nozzle temp to stabilize\nG1 Z0.28 F240\nG92 E0\nG1 Y140 E10 F1500 ; prime the nozzle\nG1 X2.3 F5000\nG92 E0\nG1 Y10 E10 F1200 ; prime the nozzle\nG92 E0
|
||||
|
||||
# Intended for printers with vendor official firmware verified to support M25
|
||||
# [printer:*pauseprint*]
|
||||
# pause_print_gcode = M25 ; pause print
|
||||
|
||||
# Intended for printers where the Z-axis lowers the print bed during printing
|
||||
# [printer:*invertedz*]
|
||||
# end_gcode = {if max_layer_z < max_print_height}G1 Z{z_offset+min(max_layer_z+2, max_print_height)} F600{endif} ; Move print bed down\nG1 X50 Y50 F{travel_speed*60} ; present print\n{if max_layer_z < max_print_height-10}G1 Z{z_offset+max_print_height-10} F600{endif} ; Move print bed down further down\nM140 S0 ; turn off heatbed\nM104 S0 ; turn off temperature\nM107 ; turn off fan\nM84 X Y E ; disable motors
|
||||
|
||||
# Intended for printers with dual extruders and a single hotend/nozzle
|
||||
[printer:*dualextruder*]
|
||||
single_extruder_multi_material = 1
|
||||
cooling_tube_length = 23
|
||||
cooling_tube_retraction = 35
|
||||
extra_loading_move = -2
|
||||
parking_pos_retraction = 80
|
||||
deretract_speed = 40,40
|
||||
extruder_colour = #0080C0;#FFFF9F
|
||||
extruder_offset = 0x0,0x0
|
||||
max_layer_height = 0.28,0.28
|
||||
min_layer_height = 0.08,0.08
|
||||
nozzle_diameter = 0.4,0.4
|
||||
retract_before_travel = 2,2
|
||||
retract_before_wipe = 70%,70%
|
||||
retract_layer_change = 1,1
|
||||
retract_length = 5,5
|
||||
retract_length_toolchange = 1,1
|
||||
retract_lift = 0,0
|
||||
retract_lift_above = 0,0
|
||||
retract_lift_below = 0,0
|
||||
retract_restart_extra = 0,0
|
||||
retract_restart_extra_toolchange = 0,0
|
||||
retract_speed = 60,60
|
||||
wipe = 1,1
|
||||
start_gcode = T[initial_tool] ; set active extruder\nG90 ; use absolute coordinates\nM83 ; extruder relative mode\nM140 S{first_layer_bed_temperature[0]} ; set final bed temp\nM104 S150 ; set temporary nozzle temp to prevent oozing during homing and auto bed leveling\nG4 S10 ; allow partial nozzle warmup\nG28 ; home all axis\n;G29 ; auto bed levelling - remove ; at beginning of line to enable\n;M420 S1 ; enable mesh - remove ; at beginning of line to enable\nG1 Z50 F240\nG1 X2 Y10 F3000\nM104 S{first_layer_temperature[0]} ; set final nozzle temp\nM190 S{first_layer_bed_temperature[0]} ; wait for bed temp to stabilize\nM109 S{first_layer_temperature[0]} ; wait for nozzle temp to stabilize\nG1 Z0.28 F240 ; move down to prime nozzle\nG92 E0 ; reset extruder\nG1 E90 ; load filament\nG92 E0 ; reset extruder\nG1 Y140 E10 F1500 ; prime the nozzle\nG1 X2.3 F5000 ; move over for second prime line\nG92 E0 ; reset extruder\nG1 Y10 E10 F1200 ; prime the nozzle\nG92 E0 ; reset extruder
|
||||
end_gcode = {if max_layer_z < max_print_height}G1 Z{z_offset+min(max_layer_z+2, max_print_height)} F600 ; Move print head up{endif}\nG1 X5 Y{print_bed_max[1]*0.8} F{travel_speed*60} ; present print\nG1 E-80 F2000 ; unload filament\n{if max_layer_z < max_print_height-10}G1 Z{z_offset+min(max_layer_z+70, max_print_height-10)} F600 ; Move print head further up{endif}\n{if max_layer_z < max_print_height*0.6}G1 Z{max_print_height*0.6} F600 ; Move print head further up{endif}\nM140 S0 ; turn off heatbed\nM104 S0 ; turn off temperature\nM107 ; turn off fan\nM84 X Y E ; disable motors
|
||||
|
||||
# Copy of Creality CR-X config for the Neptune 2D (dual extruder, single hotend)
|
||||
|
||||
[printer:Elegoo Neptune-2D]
|
||||
inherits = Elegoo Neptune-2; *dualextruder*
|
||||
retract_length = 6,6
|
||||
printer_model = NEPTUNE2D
|
||||
printer_notes = Do not remove the following keywords! These keywords are used in the "compatible printer" condition of the print and filament profiles to link the particular print and filament profiles to this printer profile.\nPRINTER_VENDOR_ELEGOO\nPRINTER_MODEL_NEPTUNE2D\nPRINTER_HAS_BOWDEN
|
||||
|
||||
[printer:Elegoo Neptune-2S]
|
||||
inherits = Elegoo Neptune-2
|
||||
printer_model = NEPTUNE2S
|
||||
printer_notes = Do not remove the following keywords! These keywords are used in the "compatible printer" condition of the print and filament profiles to link the particular print and filament profiles to this printer profile.\nPRINTER_VENDOR_ELEGOO\nPRINTER_MODEL_NEPTUNE2D\nPRINTER_HAS_BOWDEN
|
||||
|
||||
[printer:Elegoo Neptune-X]
|
||||
inherits = Elegoo Neptune-2
|
||||
max_print_height = 300
|
||||
printer_model = NEPTUNEX
|
||||
printer_notes = Do not remove the following keywords! These keywords are used in the "compatible printer" condition of the print and filament profiles to link the particular print and filament profiles to this printer profile.\nPRINTER_VENDOR_ELEGOO\nPRINTER_MODEL_NEPTUNE2D\nPRINTER_HAS_BOWDEN
|
||||
|
||||
[printer:Elegoo Neptune-3]
|
||||
inherits = Elegoo Neptune-2
|
||||
max_print_height = 280
|
||||
start_gcode = G90 ; use absolute coordinates\nM83 ; extruder relative mode\nM104 S120 ; set temporary nozzle temp to prevent oozing during homing and auto bed leveling\nM140 S[first_layer_bed_temperature] ; set final bed temp\nG4 S10 ; allow partial nozzle warmup\nG28 ; home all axis\nG29 ; run abl mesh\nM420 S1 ; load mesh\nG1 Z50 F240\nG1 X2 Y10 F3000\nM104 S[first_layer_temperature] ; set final nozzle temp\nM190 S[first_layer_bed_temperature] ; wait for bed temp to stabilize\nM109 S[first_layer_temperature] ; wait for nozzle temp to stabilize\nG1 Z0.28 F240\nG92 E0\nG1 Y140 E10 F1500 ; prime the nozzle\nG1 X2.3 F5000\nG92 E0\nG1 Y10 E10 F1200 ; prime the nozzle\nG92 E0
|
||||
printer_model = NEPTUNE3
|
||||
printer_notes = Do not remove the following keywords! These keywords are used in the "compatible printer" condition of the print and filament profiles to link the particular print and filament profiles to this printer profile.\nPRINTER_VENDOR_ELEGOO\nPRINTER_MODEL_NEPTUNE2D\nPRINTER_HAS_BOWDEN
|
||||
|
||||
[printer:Elegoo Neptune-1]
|
||||
inherits = Elegoo Neptune-2
|
||||
bed_shape = 0x0,210x0,210x210,0x210
|
||||
max_print_height = 200
|
||||
printer_model = NEPTUNE1
|
||||
printer_notes = Do not remove the following keywords! These keywords are used in the "compatible printer" condition of the print and filament profiles to link the particular print and filament profiles to this printer profile.\nPRINTER_VENDOR_ELEGOO\nPRINTER_MODEL_NEPTUNE2D\nPRINTER_HAS_BOWDEN
|
BIN
resources/profiles/Elegoo/NEPTUNE1_thumbnail.png
Normal file
After Width: | Height: | Size: 37 KiB |
BIN
resources/profiles/Elegoo/NEPTUNE2D_thumbnail.png
Normal file
After Width: | Height: | Size: 45 KiB |
BIN
resources/profiles/Elegoo/NEPTUNE2S_thumbnail.png
Normal file
After Width: | Height: | Size: 50 KiB |
BIN
resources/profiles/Elegoo/NEPTUNE2_thumbnail.png
Normal file
After Width: | Height: | Size: 42 KiB |
BIN
resources/profiles/Elegoo/NEPTUNE3_thumbnail.png
Normal file
After Width: | Height: | Size: 44 KiB |
BIN
resources/profiles/Elegoo/NEPTUNEX_thumbnail.png
Normal file
After Width: | Height: | Size: 37 KiB |
Before Width: | Height: | Size: 13 KiB After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 11 KiB |
Before Width: | Height: | Size: 11 KiB After Width: | Height: | Size: 10 KiB |
@ -14,8 +14,6 @@ add_subdirectory(semver)
|
||||
add_subdirectory(libigl)
|
||||
add_subdirectory(hints)
|
||||
add_subdirectory(qoi)
|
||||
|
||||
# Adding libnest2d project for bin packing...
|
||||
add_subdirectory(libnest2d)
|
||||
|
||||
find_package(Qhull 7.2 REQUIRED)
|
||||
@ -30,6 +28,10 @@ endif()
|
||||
|
||||
add_subdirectory(libslic3r)
|
||||
|
||||
if (SLIC3R_ENABLE_FORMAT_STEP)
|
||||
add_subdirectory(occt_wrapper)
|
||||
endif ()
|
||||
|
||||
if (SLIC3R_GUI)
|
||||
add_subdirectory(imgui)
|
||||
add_subdirectory(hidapi)
|
||||
@ -58,18 +60,25 @@ if (SLIC3R_GUI)
|
||||
"Hint: On Linux you can set -DSLIC3R_WX_STABLE=1 to use wxWidgets 3.0\n")
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
include(${wxWidgets_USE_FILE})
|
||||
else ()
|
||||
find_package(wxWidgets 3.1 REQUIRED COMPONENTS html adv gl core base)
|
||||
find_package(wxWidgets 3.1 COMPONENTS html adv gl core base)
|
||||
if (NOT wxWidgets_FOUND)
|
||||
message(STATUS "Trying to find wxWidgets in CONFIG mode...")
|
||||
find_package(wxWidgets 3.2 CONFIG REQUIRED COMPONENTS html adv gl core base)
|
||||
else ()
|
||||
include(${wxWidgets_USE_FILE})
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
if(UNIX)
|
||||
message(STATUS "wx-config path: ${wxWidgets_CONFIG_EXECUTABLE}")
|
||||
endif()
|
||||
|
||||
include(${wxWidgets_USE_FILE})
|
||||
|
||||
find_package(JPEG QUIET)
|
||||
find_package(TIFF QUIET)
|
||||
find_package(NanoSVG REQUIRED)
|
||||
|
||||
string(REGEX MATCH "wxpng" WX_PNG_BUILTIN ${wxWidgets_LIBRARIES})
|
||||
if (PNG_FOUND AND NOT WX_PNG_BUILTIN)
|
||||
@ -105,7 +114,10 @@ if (SLIC3R_GUI)
|
||||
# wrong libs for opengl in the link line and it does not link to it by himself.
|
||||
# libslic3r_gui will link to opengl anyway, so lets override wx
|
||||
list(FILTER wxWidgets_LIBRARIES EXCLUDE REGEX OpenGL)
|
||||
|
||||
|
||||
if (UNIX AND NOT APPLE)
|
||||
list(APPEND wxWidgets_LIBRARIES X11 wayland-client wayland-egl EGL)
|
||||
endif ()
|
||||
# list(REMOVE_ITEM wxWidgets_LIBRARIES oleacc)
|
||||
message(STATUS "wx libs: ${wxWidgets_LIBRARIES}")
|
||||
|
||||
@ -136,6 +148,7 @@ if (NOT WIN32 AND NOT APPLE)
|
||||
endif ()
|
||||
|
||||
target_link_libraries(PrusaSlicer libslic3r libcereal)
|
||||
|
||||
if (APPLE)
|
||||
# add_compile_options(-stdlib=libc++)
|
||||
# add_definitions(-DBOOST_THREAD_DONT_USE_CHRONO -DBOOST_NO_CXX11_RVALUE_REFERENCES -DBOOST_THREAD_USES_MOVE)
|
||||
|
@ -315,460 +315,6 @@ inline NfpResult<RawShape> nfpConvexOnly(const RawShape& sh,
|
||||
return {rsh, top_nfp};
|
||||
}
|
||||
|
||||
template<class RawShape>
|
||||
NfpResult<RawShape> nfpSimpleSimple(const RawShape& cstationary,
|
||||
const RawShape& cother)
|
||||
{
|
||||
|
||||
// Algorithms are from the original algorithm proposed in paper:
|
||||
// https://eprints.soton.ac.uk/36850/1/CORMSIS-05-05.pdf
|
||||
|
||||
// /////////////////////////////////////////////////////////////////////////
|
||||
// Algorithm 1: Obtaining the minkowski sum
|
||||
// /////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// I guess this is not a full minkowski sum of the two input polygons by
|
||||
// definition. This yields a subset that is compatible with the next 2
|
||||
// algorithms.
|
||||
|
||||
using Result = NfpResult<RawShape>;
|
||||
using Vertex = TPoint<RawShape>;
|
||||
using Coord = TCoord<Vertex>;
|
||||
using Edge = _Segment<Vertex>;
|
||||
namespace sl = shapelike;
|
||||
using std::signbit;
|
||||
using std::sort;
|
||||
using std::vector;
|
||||
using std::ref;
|
||||
using std::reference_wrapper;
|
||||
|
||||
// TODO The original algorithms expects the stationary polygon in
|
||||
// counter clockwise and the orbiter in clockwise order.
|
||||
// So for preventing any further complication, I will make the input
|
||||
// the way it should be, than make my way around the orientations.
|
||||
|
||||
// Reverse the stationary contour to counter clockwise
|
||||
auto stcont = sl::contour(cstationary);
|
||||
{
|
||||
std::reverse(sl::begin(stcont), sl::end(stcont));
|
||||
stcont.pop_back();
|
||||
auto it = std::min_element(sl::begin(stcont), sl::end(stcont),
|
||||
[](const Vertex& v1, const Vertex& v2) {
|
||||
return getY(v1) < getY(v2);
|
||||
});
|
||||
std::rotate(sl::begin(stcont), it, sl::end(stcont));
|
||||
sl::addVertex(stcont, sl::front(stcont));
|
||||
}
|
||||
RawShape stationary;
|
||||
sl::contour(stationary) = stcont;
|
||||
|
||||
// Reverse the orbiter contour to counter clockwise
|
||||
auto orbcont = sl::contour(cother);
|
||||
{
|
||||
std::reverse(orbcont.begin(), orbcont.end());
|
||||
|
||||
// Step 1: Make the orbiter reverse oriented
|
||||
|
||||
orbcont.pop_back();
|
||||
auto it = std::min_element(orbcont.begin(), orbcont.end(),
|
||||
[](const Vertex& v1, const Vertex& v2) {
|
||||
return getY(v1) < getY(v2);
|
||||
});
|
||||
|
||||
std::rotate(orbcont.begin(), it, orbcont.end());
|
||||
orbcont.emplace_back(orbcont.front());
|
||||
|
||||
for(auto &v : orbcont) v = -v;
|
||||
|
||||
}
|
||||
|
||||
// Copy the orbiter (contour only), we will have to work on it
|
||||
RawShape orbiter;
|
||||
sl::contour(orbiter) = orbcont;
|
||||
|
||||
// An edge with additional data for marking it
|
||||
struct MarkedEdge {
|
||||
Edge e; Radians turn_angle = 0; bool is_turning_point = false;
|
||||
MarkedEdge() = default;
|
||||
MarkedEdge(const Edge& ed, Radians ta, bool tp):
|
||||
e(ed), turn_angle(ta), is_turning_point(tp) {}
|
||||
|
||||
// debug
|
||||
std::string label;
|
||||
};
|
||||
|
||||
// Container for marked edges
|
||||
using EdgeList = vector<MarkedEdge>;
|
||||
|
||||
EdgeList A, B;
|
||||
|
||||
// This is how an edge list is created from the polygons
|
||||
auto fillEdgeList = [](EdgeList& L, const RawShape& ppoly, int dir) {
|
||||
auto& poly = sl::contour(ppoly);
|
||||
|
||||
L.reserve(sl::contourVertexCount(poly));
|
||||
|
||||
if(dir > 0) {
|
||||
auto it = poly.begin();
|
||||
auto nextit = std::next(it);
|
||||
|
||||
double turn_angle = 0;
|
||||
bool is_turn_point = false;
|
||||
|
||||
while(nextit != poly.end()) {
|
||||
L.emplace_back(Edge(*it, *nextit), turn_angle, is_turn_point);
|
||||
it++; nextit++;
|
||||
}
|
||||
} else {
|
||||
auto it = sl::rbegin(poly);
|
||||
auto nextit = std::next(it);
|
||||
|
||||
double turn_angle = 0;
|
||||
bool is_turn_point = false;
|
||||
|
||||
while(nextit != sl::rend(poly)) {
|
||||
L.emplace_back(Edge(*it, *nextit), turn_angle, is_turn_point);
|
||||
it++; nextit++;
|
||||
}
|
||||
}
|
||||
|
||||
auto getTurnAngle = [](const Edge& e1, const Edge& e2) {
|
||||
auto phi = e1.angleToXaxis();
|
||||
auto phi_prev = e2.angleToXaxis();
|
||||
auto turn_angle = phi-phi_prev;
|
||||
if(turn_angle > Pi) turn_angle -= TwoPi;
|
||||
if(turn_angle < -Pi) turn_angle += TwoPi;
|
||||
return turn_angle;
|
||||
};
|
||||
|
||||
auto eit = L.begin();
|
||||
auto enext = std::next(eit);
|
||||
|
||||
eit->turn_angle = getTurnAngle(L.front().e, L.back().e);
|
||||
|
||||
while(enext != L.end()) {
|
||||
enext->turn_angle = getTurnAngle( enext->e, eit->e);
|
||||
eit->is_turning_point =
|
||||
signbit(enext->turn_angle) != signbit(eit->turn_angle);
|
||||
++eit; ++enext;
|
||||
}
|
||||
|
||||
L.back().is_turning_point = signbit(L.back().turn_angle) !=
|
||||
signbit(L.front().turn_angle);
|
||||
|
||||
};
|
||||
|
||||
// Step 2: Fill the edgelists
|
||||
fillEdgeList(A, stationary, 1);
|
||||
fillEdgeList(B, orbiter, 1);
|
||||
|
||||
int i = 1;
|
||||
for(MarkedEdge& me : A) {
|
||||
std::cout << "a" << i << ":\n\t"
|
||||
<< getX(me.e.first()) << " " << getY(me.e.first()) << "\n\t"
|
||||
<< getX(me.e.second()) << " " << getY(me.e.second()) << "\n\t"
|
||||
<< "Turning point: " << (me.is_turning_point ? "yes" : "no")
|
||||
<< std::endl;
|
||||
|
||||
me.label = "a"; me.label += std::to_string(i);
|
||||
i++;
|
||||
}
|
||||
|
||||
i = 1;
|
||||
for(MarkedEdge& me : B) {
|
||||
std::cout << "b" << i << ":\n\t"
|
||||
<< getX(me.e.first()) << " " << getY(me.e.first()) << "\n\t"
|
||||
<< getX(me.e.second()) << " " << getY(me.e.second()) << "\n\t"
|
||||
<< "Turning point: " << (me.is_turning_point ? "yes" : "no")
|
||||
<< std::endl;
|
||||
me.label = "b"; me.label += std::to_string(i);
|
||||
i++;
|
||||
}
|
||||
|
||||
// A reference to a marked edge that also knows its container
|
||||
struct MarkedEdgeRef {
|
||||
reference_wrapper<MarkedEdge> eref;
|
||||
reference_wrapper<vector<MarkedEdgeRef>> container;
|
||||
Coord dir = 1; // Direction modifier
|
||||
|
||||
inline Radians angleX() const { return eref.get().e.angleToXaxis(); }
|
||||
inline const Edge& edge() const { return eref.get().e; }
|
||||
inline Edge& edge() { return eref.get().e; }
|
||||
inline bool isTurningPoint() const {
|
||||
return eref.get().is_turning_point;
|
||||
}
|
||||
inline bool isFrom(const vector<MarkedEdgeRef>& cont ) {
|
||||
return &(container.get()) == &cont;
|
||||
}
|
||||
inline bool eq(const MarkedEdgeRef& mr) {
|
||||
return &(eref.get()) == &(mr.eref.get());
|
||||
}
|
||||
|
||||
MarkedEdgeRef(reference_wrapper<MarkedEdge> er,
|
||||
reference_wrapper<vector<MarkedEdgeRef>> ec):
|
||||
eref(er), container(ec), dir(1) {}
|
||||
|
||||
MarkedEdgeRef(reference_wrapper<MarkedEdge> er,
|
||||
reference_wrapper<vector<MarkedEdgeRef>> ec,
|
||||
Coord d):
|
||||
eref(er), container(ec), dir(d) {}
|
||||
};
|
||||
|
||||
using EdgeRefList = vector<MarkedEdgeRef>;
|
||||
|
||||
// Comparing two marked edges
|
||||
auto sortfn = [](const MarkedEdgeRef& e1, const MarkedEdgeRef& e2) {
|
||||
return e1.angleX() < e2.angleX();
|
||||
};
|
||||
|
||||
EdgeRefList Aref, Bref; // We create containers for the references
|
||||
Aref.reserve(A.size()); Bref.reserve(B.size());
|
||||
|
||||
// Fill reference container for the stationary polygon
|
||||
std::for_each(A.begin(), A.end(), [&Aref](MarkedEdge& me) {
|
||||
Aref.emplace_back( ref(me), ref(Aref) );
|
||||
});
|
||||
|
||||
// Fill reference container for the orbiting polygon
|
||||
std::for_each(B.begin(), B.end(), [&Bref](MarkedEdge& me) {
|
||||
Bref.emplace_back( ref(me), ref(Bref) );
|
||||
});
|
||||
|
||||
auto mink = [sortfn] // the Mink(Q, R, direction) sub-procedure
|
||||
(const EdgeRefList& Q, const EdgeRefList& R, bool positive)
|
||||
{
|
||||
|
||||
// Step 1 "merge sort_list(Q) and sort_list(R) to form merge_list(Q,R)"
|
||||
// Sort the containers of edge references and merge them.
|
||||
// Q could be sorted only once and be reused here but we would still
|
||||
// need to merge it with sorted(R).
|
||||
|
||||
EdgeRefList merged;
|
||||
EdgeRefList S, seq;
|
||||
merged.reserve(Q.size() + R.size());
|
||||
|
||||
merged.insert(merged.end(), R.begin(), R.end());
|
||||
std::stable_sort(merged.begin(), merged.end(), sortfn);
|
||||
merged.insert(merged.end(), Q.begin(), Q.end());
|
||||
std::stable_sort(merged.begin(), merged.end(), sortfn);
|
||||
|
||||
// Step 2 "set i = 1, k = 1, direction = 1, s1 = q1"
|
||||
// we don't use i, instead, q is an iterator into Q. k would be an index
|
||||
// into the merged sequence but we use "it" as an iterator for that
|
||||
|
||||
// here we obtain references for the containers for later comparisons
|
||||
const auto& Rcont = R.begin()->container.get();
|
||||
const auto& Qcont = Q.begin()->container.get();
|
||||
|
||||
// Set the initial direction
|
||||
Coord dir = 1;
|
||||
|
||||
// roughly i = 1 (so q = Q.begin()) and s1 = q1 so S[0] = q;
|
||||
if(positive) {
|
||||
auto q = Q.begin();
|
||||
S.emplace_back(*q);
|
||||
|
||||
// Roughly step 3
|
||||
|
||||
std::cout << "merged size: " << merged.size() << std::endl;
|
||||
auto mit = merged.begin();
|
||||
for(bool finish = false; !finish && q != Q.end();) {
|
||||
++q; // "Set i = i + 1"
|
||||
|
||||
while(!finish && mit != merged.end()) {
|
||||
if(mit->isFrom(Rcont)) {
|
||||
auto s = *mit;
|
||||
s.dir = dir;
|
||||
S.emplace_back(s);
|
||||
}
|
||||
|
||||
if(mit->eq(*q)) {
|
||||
S.emplace_back(*q);
|
||||
if(mit->isTurningPoint()) dir = -dir;
|
||||
if(q == Q.begin()) finish = true;
|
||||
break;
|
||||
}
|
||||
|
||||
mit += dir;
|
||||
// __nfp::advance(mit, merged, dir > 0);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
auto q = Q.rbegin();
|
||||
S.emplace_back(*q);
|
||||
|
||||
// Roughly step 3
|
||||
|
||||
std::cout << "merged size: " << merged.size() << std::endl;
|
||||
auto mit = merged.begin();
|
||||
for(bool finish = false; !finish && q != Q.rend();) {
|
||||
++q; // "Set i = i + 1"
|
||||
|
||||
while(!finish && mit != merged.end()) {
|
||||
if(mit->isFrom(Rcont)) {
|
||||
auto s = *mit;
|
||||
s.dir = dir;
|
||||
S.emplace_back(s);
|
||||
}
|
||||
|
||||
if(mit->eq(*q)) {
|
||||
S.emplace_back(*q);
|
||||
S.back().dir = -1;
|
||||
if(mit->isTurningPoint()) dir = -dir;
|
||||
if(q == Q.rbegin()) finish = true;
|
||||
break;
|
||||
}
|
||||
|
||||
mit += dir;
|
||||
// __nfp::advance(mit, merged, dir > 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Step 4:
|
||||
|
||||
// "Let starting edge r1 be in position si in sequence"
|
||||
// whaaat? I guess this means the following:
|
||||
auto it = S.begin();
|
||||
while(!it->eq(*R.begin())) ++it;
|
||||
|
||||
// "Set j = 1, next = 2, direction = 1, seq1 = si"
|
||||
// we don't use j, seq is expanded dynamically.
|
||||
dir = 1;
|
||||
auto next = std::next(R.begin()); seq.emplace_back(*it);
|
||||
|
||||
// Step 5:
|
||||
// "If all si edges have been allocated to seqj" should mean that
|
||||
// we loop until seq has equal size with S
|
||||
auto send = it; //it == S.begin() ? it : std::prev(it);
|
||||
while(it != S.end()) {
|
||||
++it; if(it == S.end()) it = S.begin();
|
||||
if(it == send) break;
|
||||
|
||||
if(it->isFrom(Qcont)) {
|
||||
seq.emplace_back(*it); // "If si is from Q, j = j + 1, seqj = si"
|
||||
|
||||
// "If si is a turning point in Q,
|
||||
// direction = - direction, next = next + direction"
|
||||
if(it->isTurningPoint()) {
|
||||
dir = -dir;
|
||||
next += dir;
|
||||
// __nfp::advance(next, R, dir > 0);
|
||||
}
|
||||
}
|
||||
|
||||
if(it->eq(*next) /*&& dir == next->dir*/) { // "If si = direction.rnext"
|
||||
// "j = j + 1, seqj = si, next = next + direction"
|
||||
seq.emplace_back(*it);
|
||||
next += dir;
|
||||
// __nfp::advance(next, R, dir > 0);
|
||||
}
|
||||
}
|
||||
|
||||
return seq;
|
||||
};
|
||||
|
||||
std::vector<EdgeRefList> seqlist;
|
||||
seqlist.reserve(Bref.size());
|
||||
|
||||
EdgeRefList Bslope = Bref; // copy Bref, we will make a slope diagram
|
||||
|
||||
// make the slope diagram of B
|
||||
std::sort(Bslope.begin(), Bslope.end(), sortfn);
|
||||
|
||||
auto slopeit = Bslope.begin(); // search for the first turning point
|
||||
while(!slopeit->isTurningPoint() && slopeit != Bslope.end()) slopeit++;
|
||||
|
||||
if(slopeit == Bslope.end()) {
|
||||
// no turning point means convex polygon.
|
||||
seqlist.emplace_back(mink(Aref, Bref, true));
|
||||
} else {
|
||||
int dir = 1;
|
||||
|
||||
auto firstturn = Bref.begin();
|
||||
while(!firstturn->eq(*slopeit)) ++firstturn;
|
||||
|
||||
assert(firstturn != Bref.end());
|
||||
|
||||
EdgeRefList bgroup; bgroup.reserve(Bref.size());
|
||||
bgroup.emplace_back(*slopeit);
|
||||
|
||||
auto b_it = std::next(firstturn);
|
||||
while(b_it != firstturn) {
|
||||
if(b_it == Bref.end()) b_it = Bref.begin();
|
||||
|
||||
while(!slopeit->eq(*b_it)) {
|
||||
__nfp::advance(slopeit, Bslope, dir > 0);
|
||||
}
|
||||
|
||||
if(!slopeit->isTurningPoint()) {
|
||||
bgroup.emplace_back(*slopeit);
|
||||
} else {
|
||||
if(!bgroup.empty()) {
|
||||
if(dir > 0) bgroup.emplace_back(*slopeit);
|
||||
for(auto& me : bgroup) {
|
||||
std::cout << me.eref.get().label << ", ";
|
||||
}
|
||||
std::cout << std::endl;
|
||||
seqlist.emplace_back(mink(Aref, bgroup, dir == 1 ? true : false));
|
||||
bgroup.clear();
|
||||
if(dir < 0) bgroup.emplace_back(*slopeit);
|
||||
} else {
|
||||
bgroup.emplace_back(*slopeit);
|
||||
}
|
||||
|
||||
dir *= -1;
|
||||
}
|
||||
++b_it;
|
||||
}
|
||||
}
|
||||
|
||||
// while(it != Bref.end()) // This is step 3 and step 4 in one loop
|
||||
// if(it->isTurningPoint()) {
|
||||
// R = {R.last, it++};
|
||||
// auto seq = mink(Q, R, orientation);
|
||||
|
||||
// // TODO step 6 (should be 5 shouldn't it?): linking edges from A
|
||||
// // I don't get this step
|
||||
|
||||
// seqlist.insert(seqlist.end(), seq.begin(), seq.end());
|
||||
// orientation = !orientation;
|
||||
// } else ++it;
|
||||
|
||||
// if(seqlist.empty()) seqlist = mink(Q, {Bref.begin(), Bref.end()}, true);
|
||||
|
||||
// /////////////////////////////////////////////////////////////////////////
|
||||
// Algorithm 2: breaking Minkowski sums into track line trips
|
||||
// /////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
// /////////////////////////////////////////////////////////////////////////
|
||||
// Algorithm 3: finding the boundary of the NFP from track line trips
|
||||
// /////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
for(auto& seq : seqlist) {
|
||||
std::cout << "seqlist size: " << seq.size() << std::endl;
|
||||
for(auto& s : seq) {
|
||||
std::cout << (s.dir > 0 ? "" : "-") << s.eref.get().label << ", ";
|
||||
}
|
||||
std::cout << std::endl;
|
||||
}
|
||||
|
||||
auto& seq = seqlist.front();
|
||||
RawShape rsh;
|
||||
Vertex top_nfp;
|
||||
std::vector<Edge> edgelist; edgelist.reserve(seq.size());
|
||||
for(auto& s : seq) {
|
||||
edgelist.emplace_back(s.eref.get().e);
|
||||
}
|
||||
|
||||
__nfp::buildPolygon(edgelist, rsh, top_nfp);
|
||||
|
||||
return Result(rsh, top_nfp);
|
||||
}
|
||||
|
||||
// Specializable NFP implementation class. Specialize it if you have a faster
|
||||
// or better NFP implementation
|
||||
template<class RawShape, NfpLevel nfptype>
|
||||
@ -793,8 +339,7 @@ inline NfpResult<RawShape> noFitPolygon(const RawShape& sh,
|
||||
return nfps(sh, other);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace nfp
|
||||
} // namespace libnest2d
|
||||
|
||||
#endif // GEOMETRIES_NOFITPOLYGON_HPP
|
||||
|
@ -375,7 +375,7 @@ protected:
|
||||
sl::addVertex(rsh, item.vertex(static_cast<unsigned long>(i)));
|
||||
};
|
||||
|
||||
auto addOthers = [&addOthers_, &reverseAddOthers_]() {
|
||||
auto addOthers = [&]() {
|
||||
if constexpr (!is_clockwise<RawShape>())
|
||||
addOthers_();
|
||||
else
|
||||
@ -415,7 +415,6 @@ protected:
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
}} // namespace libnest2d::placers
|
||||
|
||||
#endif //BOTTOMLEFT_HPP
|
||||
|
@ -150,10 +150,8 @@ void AppConfig::set_defaults()
|
||||
if (get("order_volumes").empty())
|
||||
set("order_volumes", "1");
|
||||
|
||||
#if ENABLE_SHOW_NON_MANIFOLD_EDGES
|
||||
if (get("non_manifold_edges").empty())
|
||||
set("non_manifold_edges", "1");
|
||||
#endif // ENABLE_SHOW_NON_MANIFOLD_EDGES
|
||||
|
||||
if (get("clear_undo_redo_stack_on_new_project").empty())
|
||||
set("clear_undo_redo_stack_on_new_project", "1");
|
||||
|
@ -21,7 +21,7 @@ DistributedBeadingStrategy::DistributedBeadingStrategy(const coord_t optimal_wid
|
||||
name = "DistributedBeadingStrategy";
|
||||
}
|
||||
|
||||
DistributedBeadingStrategy::Beading DistributedBeadingStrategy::compute(coord_t thickness, coord_t bead_count) const
|
||||
DistributedBeadingStrategy::Beading DistributedBeadingStrategy::compute(const coord_t thickness, const coord_t bead_count) const
|
||||
{
|
||||
Beading ret;
|
||||
|
||||
@ -40,18 +40,24 @@ DistributedBeadingStrategy::Beading DistributedBeadingStrategy::compute(coord_t
|
||||
for (coord_t bead_idx = 0; bead_idx < bead_count; bead_idx++)
|
||||
weights[bead_idx] = getWeight(bead_idx);
|
||||
|
||||
const float total_weight = std::accumulate(weights.cbegin(), weights.cend(), 0.f);
|
||||
const float total_weight = std::accumulate(weights.cbegin(), weights.cend(), 0.f);
|
||||
coord_t accumulated_width = 0;
|
||||
for (coord_t bead_idx = 0; bead_idx < bead_count; bead_idx++) {
|
||||
const float weight_fraction = weights[bead_idx] / total_weight;
|
||||
const float weight_fraction = weights[bead_idx] / total_weight;
|
||||
const coord_t splitup_left_over_weight = to_be_divided * weight_fraction;
|
||||
const coord_t width = optimal_width + splitup_left_over_weight;
|
||||
const coord_t width = (bead_idx == bead_count - 1) ? thickness - accumulated_width : optimal_width + splitup_left_over_weight;
|
||||
|
||||
// Be aware that toolpath_locations is computed by dividing the width by 2, so toolpath_locations
|
||||
// could be off by 1 because of rounding errors.
|
||||
if (bead_idx == 0)
|
||||
ret.toolpath_locations.emplace_back(width / 2);
|
||||
else
|
||||
ret.toolpath_locations.emplace_back(ret.toolpath_locations.back() + (ret.bead_widths.back() + width) / 2);
|
||||
ret.bead_widths.emplace_back(width);
|
||||
accumulated_width += width;
|
||||
}
|
||||
ret.left_over = 0;
|
||||
assert((accumulated_width + ret.left_over) == thickness);
|
||||
} else if (bead_count == 2) {
|
||||
const coord_t outer_width = thickness / 2;
|
||||
ret.bead_widths.emplace_back(outer_width);
|
||||
@ -68,6 +74,13 @@ DistributedBeadingStrategy::Beading DistributedBeadingStrategy::compute(coord_t
|
||||
ret.left_over = thickness;
|
||||
}
|
||||
|
||||
assert(([&ret = std::as_const(ret), thickness]() -> bool {
|
||||
coord_t total_bead_width = 0;
|
||||
for (const coord_t &bead_width : ret.bead_widths)
|
||||
total_bead_width += bead_width;
|
||||
return (total_bead_width + ret.left_over) == thickness;
|
||||
}()));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include "Utils.hpp"
|
||||
#include "SVG.hpp"
|
||||
#include "Geometry/VoronoiVisualUtils.hpp"
|
||||
#include "Geometry/VoronoiUtilsCgal.hpp"
|
||||
#include "../EdgeGrid.hpp"
|
||||
|
||||
#define SKELETAL_TRAPEZOIDATION_BEAD_SEARCH_MAX 1000 //A limit to how long it'll keep searching for adjacent beads. Increasing will re-use beadings more often (saving performance), but search longer for beading (costing performance).
|
||||
@ -43,6 +44,71 @@ template<> struct segment_traits<Slic3r::Arachne::PolygonsSegmentIndex>
|
||||
namespace Slic3r::Arachne
|
||||
{
|
||||
|
||||
#ifdef ARACHNE_DEBUG
|
||||
static void export_graph_to_svg(const std::string &path,
|
||||
SkeletalTrapezoidationGraph &graph,
|
||||
const Polygons &polys,
|
||||
const std::vector<std::shared_ptr<LineJunctions>> &edge_junctions = {},
|
||||
const bool beat_count = true,
|
||||
const bool transition_middles = true,
|
||||
const bool transition_ends = true)
|
||||
{
|
||||
const std::vector<std::string> colors = {"blue", "cyan", "red", "orange", "magenta", "pink", "purple", "green", "yellow"};
|
||||
coordf_t stroke_width = scale_(0.03);
|
||||
BoundingBox bbox = get_extents(polys);
|
||||
for (const auto &node : graph.nodes)
|
||||
bbox.merge(node.p);
|
||||
|
||||
bbox.offset(scale_(1.));
|
||||
|
||||
::Slic3r::SVG svg(path.c_str(), bbox);
|
||||
for (const auto &line : to_lines(polys))
|
||||
svg.draw(line, "gray", stroke_width);
|
||||
|
||||
for (const auto &edge : graph.edges)
|
||||
svg.draw(Line(edge.from->p, edge.to->p), (edge.data.centralIsSet() && edge.data.isCentral()) ? "blue" : "cyan", stroke_width);
|
||||
|
||||
for (const auto &line_junction : edge_junctions)
|
||||
for (const auto &extrusion_junction : *line_junction)
|
||||
svg.draw(extrusion_junction.p, "orange", coord_t(stroke_width * 2.));
|
||||
|
||||
if (beat_count) {
|
||||
for (const auto &node : graph.nodes) {
|
||||
svg.draw(node.p, "red", coord_t(stroke_width * 1.6));
|
||||
svg.draw_text(node.p, std::to_string(node.data.bead_count).c_str(), "black", 1);
|
||||
}
|
||||
}
|
||||
|
||||
if (transition_middles) {
|
||||
for (auto &edge : graph.edges) {
|
||||
if (std::shared_ptr<std::list<SkeletalTrapezoidationEdge::TransitionMiddle>> transitions = edge.data.getTransitions(); transitions) {
|
||||
for (auto &transition : *transitions) {
|
||||
Line edge_line = Line(edge.to->p, edge.from->p);
|
||||
double edge_length = edge_line.length();
|
||||
Point pt = edge_line.a + (edge_line.vector().cast<double>() * (double(transition.pos) / edge_length)).cast<coord_t>();
|
||||
svg.draw(pt, "magenta", coord_t(stroke_width * 1.5));
|
||||
svg.draw_text(pt, std::to_string(transition.lower_bead_count).c_str(), "black", 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (transition_ends) {
|
||||
for (auto &edge : graph.edges) {
|
||||
if (std::shared_ptr<std::list<SkeletalTrapezoidationEdge::TransitionEnd>> transitions = edge.data.getTransitionEnds(); transitions) {
|
||||
for (auto &transition : *transitions) {
|
||||
Line edge_line = Line(edge.to->p, edge.from->p);
|
||||
double edge_length = edge_line.length();
|
||||
Point pt = edge_line.a + (edge_line.vector().cast<double>() * (double(transition.pos) / edge_length)).cast<coord_t>();
|
||||
svg.draw(pt, transition.is_lower_end ? "green" : "lime", coord_t(stroke_width * 1.5));
|
||||
svg.draw_text(pt, std::to_string(transition.lower_bead_count).c_str(), "black", 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
SkeletalTrapezoidation::node_t& SkeletalTrapezoidation::makeNode(vd_t::vertex_type& vd_node, Point p)
|
||||
{
|
||||
auto he_node_it = vd_node_to_he_node.find(&vd_node);
|
||||
@ -285,7 +351,6 @@ std::vector<Point> SkeletalTrapezoidation::discretize(const vd_t::edge_type& vd_
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool SkeletalTrapezoidation::computePointCellRange(vd_t::cell_type& cell, Point& start_source_point, Point& end_source_point, vd_t::edge_type*& starting_vd_edge, vd_t::edge_type*& ending_vd_edge, const std::vector<Segment>& segments)
|
||||
{
|
||||
if (cell.incident_edge()->is_infinite())
|
||||
@ -386,7 +451,8 @@ SkeletalTrapezoidation::SkeletalTrapezoidation(const Polygons& polys, const Bead
|
||||
constructFromPolygons(polys);
|
||||
}
|
||||
|
||||
bool detect_missing_voronoi_vertex(const Geometry::VoronoiDiagram &voronoi_diagram, const std::vector<SkeletalTrapezoidation::Segment> &segments) {
|
||||
|
||||
static bool detect_missing_voronoi_vertex(const Geometry::VoronoiDiagram &voronoi_diagram, const std::vector<SkeletalTrapezoidation::Segment> &segments) {
|
||||
for (VoronoiUtils::vd_t::cell_type cell : voronoi_diagram.cells()) {
|
||||
if (!cell.incident_edge())
|
||||
continue; // There is no spoon
|
||||
@ -432,8 +498,64 @@ bool detect_missing_voronoi_vertex(const Geometry::VoronoiDiagram &voronoi_diagr
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool has_missing_twin_edge(const SkeletalTrapezoidationGraph &graph)
|
||||
{
|
||||
for (const auto &edge : graph.edges)
|
||||
if (edge.twin == nullptr)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
inline static std::unordered_map<Point, Point, PointHash> try_to_fix_degenerated_voronoi_diagram_by_rotation(
|
||||
Geometry::VoronoiDiagram &voronoi_diagram,
|
||||
const Polygons &polys,
|
||||
Polygons &polys_copy,
|
||||
std::vector<SkeletalTrapezoidation::Segment> &segments,
|
||||
const double fix_angle)
|
||||
{
|
||||
std::unordered_map<Point, Point, PointHash> vertex_mapping;
|
||||
for (Polygon &poly : polys_copy)
|
||||
poly.rotate(fix_angle);
|
||||
|
||||
assert(polys_copy.size() == polys.size());
|
||||
for (size_t poly_idx = 0; poly_idx < polys.size(); ++poly_idx) {
|
||||
assert(polys_copy[poly_idx].size() == polys[poly_idx].size());
|
||||
for (size_t point_idx = 0; point_idx < polys[poly_idx].size(); ++point_idx)
|
||||
vertex_mapping.insert({polys[poly_idx][point_idx], polys_copy[poly_idx][point_idx]});
|
||||
}
|
||||
|
||||
segments.clear();
|
||||
for (size_t poly_idx = 0; poly_idx < polys_copy.size(); poly_idx++)
|
||||
for (size_t point_idx = 0; point_idx < polys_copy[poly_idx].size(); point_idx++)
|
||||
segments.emplace_back(&polys_copy, poly_idx, point_idx);
|
||||
|
||||
voronoi_diagram.clear();
|
||||
construct_voronoi(segments.begin(), segments.end(), &voronoi_diagram);
|
||||
|
||||
assert(Geometry::VoronoiUtilsCgal::is_voronoi_diagram_planar_intersection(voronoi_diagram));
|
||||
|
||||
return vertex_mapping;
|
||||
}
|
||||
|
||||
inline static void rotate_back_skeletal_trapezoidation_graph_after_fix(SkeletalTrapezoidationGraph &graph,
|
||||
const double fix_angle,
|
||||
const std::unordered_map<Point, Point, PointHash> &vertex_mapping)
|
||||
{
|
||||
for (STHalfEdgeNode &node : graph.nodes) {
|
||||
// If a mapping exists between a rotated point and an original point, use this mapping. Otherwise, rotate a point in the opposite direction.
|
||||
if (auto node_it = vertex_mapping.find(node.p); node_it != vertex_mapping.end())
|
||||
node.p = node_it->second;
|
||||
else
|
||||
node.p.rotate(-fix_angle);
|
||||
}
|
||||
}
|
||||
|
||||
void SkeletalTrapezoidation::constructFromPolygons(const Polygons& polys)
|
||||
{
|
||||
#ifdef ARACHNE_DEBUG
|
||||
this->outline = polys;
|
||||
#endif
|
||||
|
||||
// Check self intersections.
|
||||
assert([&polys]() -> bool {
|
||||
EdgeGrid::Grid grid;
|
||||
@ -450,39 +572,61 @@ void SkeletalTrapezoidation::constructFromPolygons(const Polygons& polys)
|
||||
for (size_t point_idx = 0; point_idx < polys[poly_idx].size(); point_idx++)
|
||||
segments.emplace_back(&polys, poly_idx, point_idx);
|
||||
|
||||
#ifdef ARACHNE_DEBUG
|
||||
{
|
||||
static int iRun = 0;
|
||||
BoundingBox bbox = get_extents(polys);
|
||||
SVG svg(debug_out_path("arachne_voronoi-input-%d.svg", iRun++).c_str(), bbox);
|
||||
svg.draw_outline(polys, "black", scaled<coordf_t>(0.03f));
|
||||
}
|
||||
#endif
|
||||
|
||||
Geometry::VoronoiDiagram voronoi_diagram;
|
||||
construct_voronoi(segments.begin(), segments.end(), &voronoi_diagram);
|
||||
|
||||
// Try to detect cases when some Voronoi vertex is missing.
|
||||
// When any Voronoi vertex is missing, rotate input polygon and try again.
|
||||
const bool has_missing_voronoi_vertex = detect_missing_voronoi_vertex(voronoi_diagram, segments);
|
||||
const double fix_angle = PI / 6;
|
||||
#ifdef ARACHNE_DEBUG_VORONOI
|
||||
{
|
||||
static int iRun = 0;
|
||||
dump_voronoi_to_svg(debug_out_path("arachne_voronoi-diagram-%d.svg", iRun++).c_str(), voronoi_diagram, to_points(polys), to_lines(polys));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef ARACHNE_DEBUG
|
||||
assert(Geometry::VoronoiUtilsCgal::is_voronoi_diagram_planar_intersection(voronoi_diagram));
|
||||
#endif
|
||||
|
||||
// Try to detect cases when some Voronoi vertex is missing and when
|
||||
// the Voronoi diagram is not planar.
|
||||
// When any Voronoi vertex is missing, or the Voronoi diagram is not
|
||||
// planar, rotate the input polygon and try again.
|
||||
const bool has_missing_voronoi_vertex = detect_missing_voronoi_vertex(voronoi_diagram, segments);
|
||||
// Detection of non-planar Voronoi diagram detects at least GH issues #8474, #8514 and #8446.
|
||||
const bool is_voronoi_diagram_planar = Geometry::VoronoiUtilsCgal::is_voronoi_diagram_planar_angle(voronoi_diagram);
|
||||
const double fix_angle = PI / 6;
|
||||
|
||||
std::unordered_map<Point, Point, PointHash> vertex_mapping;
|
||||
// polys_copy is referenced through items stored in the std::vector segments.
|
||||
Polygons polys_copy = polys;
|
||||
if (has_missing_voronoi_vertex) {
|
||||
BOOST_LOG_TRIVIAL(debug) << "Detected missing Voronoi vertex, input polygons will be rotated back and forth.";
|
||||
for (Polygon &poly : polys_copy)
|
||||
poly.rotate(fix_angle);
|
||||
if (has_missing_voronoi_vertex || !is_voronoi_diagram_planar) {
|
||||
if (has_missing_voronoi_vertex)
|
||||
BOOST_LOG_TRIVIAL(warning) << "Detected missing Voronoi vertex, input polygons will be rotated back and forth.";
|
||||
else if (!is_voronoi_diagram_planar)
|
||||
BOOST_LOG_TRIVIAL(warning) << "Detected non-planar Voronoi diagram, input polygons will be rotated back and forth.";
|
||||
|
||||
assert(polys_copy.size() == polys.size());
|
||||
for (size_t poly_idx = 0; poly_idx < polys.size(); ++poly_idx) {
|
||||
assert(polys_copy[poly_idx].size() == polys[poly_idx].size());
|
||||
for (size_t point_idx = 0; point_idx < polys[poly_idx].size(); ++point_idx)
|
||||
vertex_mapping.insert({polys[poly_idx][point_idx], polys_copy[poly_idx][point_idx]});
|
||||
}
|
||||
vertex_mapping = try_to_fix_degenerated_voronoi_diagram_by_rotation(voronoi_diagram, polys, polys_copy, segments, fix_angle);
|
||||
|
||||
segments.clear();
|
||||
for (size_t poly_idx = 0; poly_idx < polys_copy.size(); poly_idx++)
|
||||
for (size_t point_idx = 0; point_idx < polys_copy[poly_idx].size(); point_idx++)
|
||||
segments.emplace_back(&polys_copy, poly_idx, point_idx);
|
||||
|
||||
voronoi_diagram.clear();
|
||||
construct_voronoi(segments.begin(), segments.end(), &voronoi_diagram);
|
||||
assert(!detect_missing_voronoi_vertex(voronoi_diagram, segments));
|
||||
assert(Geometry::VoronoiUtilsCgal::is_voronoi_diagram_planar_angle(voronoi_diagram));
|
||||
if (detect_missing_voronoi_vertex(voronoi_diagram, segments))
|
||||
BOOST_LOG_TRIVIAL(error) << "Detected missing Voronoi vertex even after the rotation of input.";
|
||||
else if (!Geometry::VoronoiUtilsCgal::is_voronoi_diagram_planar_angle(voronoi_diagram))
|
||||
BOOST_LOG_TRIVIAL(error) << "Detected non-planar Voronoi diagram even after the rotation of input.";
|
||||
}
|
||||
|
||||
bool degenerated_voronoi_diagram = has_missing_voronoi_vertex || !is_voronoi_diagram_planar;
|
||||
|
||||
process_voronoi_diagram:
|
||||
assert(this->graph.edges.empty() && this->graph.nodes.empty() && this->vd_edge_to_he_edge.empty() && this->vd_node_to_he_node.empty());
|
||||
for (vd_t::cell_type cell : voronoi_diagram.cells()) {
|
||||
if (!cell.incident_edge())
|
||||
continue; // There is no spoon
|
||||
@ -538,16 +682,39 @@ void SkeletalTrapezoidation::constructFromPolygons(const Polygons& polys)
|
||||
prev_edge->to->data.distance_to_boundary = 0;
|
||||
}
|
||||
|
||||
if (has_missing_voronoi_vertex) {
|
||||
for (node_t &node : graph.nodes) {
|
||||
// If a mapping exists between a rotated point and an original point, use this mapping. Otherwise, rotate a point in the opposite direction.
|
||||
if (auto node_it = vertex_mapping.find(node.p); node_it != vertex_mapping.end())
|
||||
node.p = node_it->second;
|
||||
else
|
||||
node.p.rotate(-fix_angle);
|
||||
}
|
||||
// For some input polygons, as in GH issues #8474 and #8514 resulting Voronoi diagram is degenerated because it is not planar.
|
||||
// When this degenerated Voronoi diagram is processed, the resulting half-edge structure contains some edges that don't have
|
||||
// a twin edge. Based on this, we created a fast mechanism that detects those causes and tries to recompute the Voronoi
|
||||
// diagram on slightly rotated input polygons that usually make the Voronoi generator generate a non-degenerated Voronoi diagram.
|
||||
if (!degenerated_voronoi_diagram && has_missing_twin_edge(this->graph)) {
|
||||
BOOST_LOG_TRIVIAL(warning) << "Detected degenerated Voronoi diagram, input polygons will be rotated back and forth.";
|
||||
degenerated_voronoi_diagram = true;
|
||||
vertex_mapping = try_to_fix_degenerated_voronoi_diagram_by_rotation(voronoi_diagram, polys, polys_copy, segments, fix_angle);
|
||||
|
||||
assert(!detect_missing_voronoi_vertex(voronoi_diagram, segments));
|
||||
if (detect_missing_voronoi_vertex(voronoi_diagram, segments))
|
||||
BOOST_LOG_TRIVIAL(error) << "Detected missing Voronoi vertex after the rotation of input.";
|
||||
|
||||
assert(Geometry::VoronoiUtilsCgal::is_voronoi_diagram_planar_intersection(voronoi_diagram));
|
||||
|
||||
this->graph.edges.clear();
|
||||
this->graph.nodes.clear();
|
||||
this->vd_edge_to_he_edge.clear();
|
||||
this->vd_node_to_he_node.clear();
|
||||
|
||||
goto process_voronoi_diagram;
|
||||
}
|
||||
|
||||
if (degenerated_voronoi_diagram) {
|
||||
assert(!has_missing_twin_edge(this->graph));
|
||||
|
||||
if (has_missing_twin_edge(this->graph))
|
||||
BOOST_LOG_TRIVIAL(error) << "Detected degenerated Voronoi diagram even after the rotation of input.";
|
||||
}
|
||||
|
||||
if (degenerated_voronoi_diagram)
|
||||
rotate_back_skeletal_trapezoidation_graph_after_fix(this->graph, fix_angle, vertex_mapping);
|
||||
|
||||
separatePointyQuadEndNodes();
|
||||
|
||||
graph.collapseSmallEdges();
|
||||
@ -594,45 +761,62 @@ void SkeletalTrapezoidation::separatePointyQuadEndNodes()
|
||||
// vvvvvvvvvvvvvvvvvvvvv
|
||||
//
|
||||
|
||||
#if 0
|
||||
static void export_graph_to_svg(const std::string &path, const SkeletalTrapezoidationGraph &graph, const Polygons &polys)
|
||||
{
|
||||
const std::vector<std::string> colors = {"blue", "cyan", "red", "orange", "magenta", "pink", "purple", "green", "yellow"};
|
||||
coordf_t stroke_width = scale_(0.05);
|
||||
BoundingBox bbox;
|
||||
for (const auto &node : graph.nodes)
|
||||
bbox.merge(node.p);
|
||||
|
||||
bbox.offset(scale_(1.));
|
||||
::Slic3r::SVG svg(path.c_str(), bbox);
|
||||
for (const auto &line : to_lines(polys))
|
||||
svg.draw(line, "red", stroke_width);
|
||||
|
||||
for (const auto &edge : graph.edges)
|
||||
svg.draw(Line(edge.from->p, edge.to->p), "cyan", scale_(0.01));
|
||||
}
|
||||
#endif
|
||||
|
||||
void SkeletalTrapezoidation::generateToolpaths(std::vector<VariableWidthLines> &generated_toolpaths, bool filter_outermost_central_edges)
|
||||
{
|
||||
#ifdef ARACHNE_DEBUG
|
||||
static int iRun = 0;
|
||||
#endif
|
||||
|
||||
p_generated_toolpaths = &generated_toolpaths;
|
||||
|
||||
updateIsCentral();
|
||||
|
||||
#ifdef ARACHNE_DEBUG
|
||||
export_graph_to_svg(debug_out_path("ST-updateIsCentral-final-%d.svg", iRun), this->graph, this->outline);
|
||||
#endif
|
||||
|
||||
filterCentral(central_filter_dist);
|
||||
|
||||
#ifdef ARACHNE_DEBUG
|
||||
export_graph_to_svg(debug_out_path("ST-filterCentral-final-%d.svg", iRun), this->graph, this->outline);
|
||||
#endif
|
||||
|
||||
if (filter_outermost_central_edges)
|
||||
filterOuterCentral();
|
||||
|
||||
updateBeadCount();
|
||||
|
||||
#ifdef ARACHNE_DEBUG
|
||||
export_graph_to_svg(debug_out_path("ST-updateBeadCount-final-%d.svg", iRun), this->graph, this->outline);
|
||||
#endif
|
||||
|
||||
filterNoncentralRegions();
|
||||
|
||||
#ifdef ARACHNE_DEBUG
|
||||
export_graph_to_svg(debug_out_path("ST-filterNoncentralRegions-final-%d.svg", iRun), this->graph, this->outline);
|
||||
#endif
|
||||
|
||||
generateTransitioningRibs();
|
||||
|
||||
#ifdef ARACHNE_DEBUG
|
||||
export_graph_to_svg(debug_out_path("ST-generateTransitioningRibs-final-%d.svg", iRun), this->graph, this->outline);
|
||||
#endif
|
||||
|
||||
generateExtraRibs();
|
||||
|
||||
#ifdef ARACHNE_DEBUG
|
||||
export_graph_to_svg(debug_out_path("ST-generateExtraRibs-final-%d.svg", iRun), this->graph, this->outline);
|
||||
#endif
|
||||
|
||||
generateSegments();
|
||||
|
||||
#ifdef ARACHNE_DEBUG
|
||||
export_graph_to_svg(debug_out_path("ST-generateSegments-final-%d.svg", iRun), this->graph, this->outline);
|
||||
#endif
|
||||
|
||||
#ifdef ARACHNE_DEBUG
|
||||
++iRun;
|
||||
#endif
|
||||
}
|
||||
|
||||
void SkeletalTrapezoidation::updateIsCentral()
|
||||
@ -844,11 +1028,24 @@ void SkeletalTrapezoidation::generateTransitioningRibs()
|
||||
|
||||
filterTransitionMids();
|
||||
|
||||
#ifdef ARACHNE_DEBUG
|
||||
static int iRun = 0;
|
||||
export_graph_to_svg(debug_out_path("ST-generateTransitioningRibs-mids-%d.svg", iRun++), this->graph, this->outline);
|
||||
#endif
|
||||
|
||||
ptr_vector_t<std::list<TransitionEnd>> edge_transition_ends; // We only map the half edge in the upward direction. mapped items are not sorted
|
||||
generateAllTransitionEnds(edge_transition_ends);
|
||||
|
||||
#ifdef ARACHNE_DEBUG
|
||||
export_graph_to_svg(debug_out_path("ST-generateTransitioningRibs-ends-%d.svg", iRun++), this->graph, this->outline);
|
||||
#endif
|
||||
|
||||
applyTransitions(edge_transition_ends);
|
||||
// Note that the shared pointer lists will be out of scope and thus destroyed here, since the remaining refs are weak_ptr.
|
||||
|
||||
#ifdef ARACHNE_DEBUG
|
||||
++iRun;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@ -1568,17 +1765,38 @@ void SkeletalTrapezoidation::generateSegments()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#ifdef ARACHNE_DEBUG
|
||||
static int iRun = 0;
|
||||
export_graph_to_svg(debug_out_path("ST-generateSegments-before-propagation-%d.svg", iRun), this->graph, this->outline);
|
||||
#endif
|
||||
|
||||
propagateBeadingsUpward(upward_quad_mids, node_beadings);
|
||||
|
||||
#ifdef ARACHNE_DEBUG
|
||||
export_graph_to_svg(debug_out_path("ST-generateSegments-upward-propagation-%d.svg", iRun), this->graph, this->outline);
|
||||
#endif
|
||||
|
||||
propagateBeadingsDownward(upward_quad_mids, node_beadings);
|
||||
|
||||
#ifdef ARACHNE_DEBUG
|
||||
export_graph_to_svg(debug_out_path("ST-generateSegments-downward-propagation-%d.svg", iRun), this->graph, this->outline);
|
||||
#endif
|
||||
|
||||
ptr_vector_t<LineJunctions> edge_junctions; // junctions ordered high R to low R
|
||||
generateJunctions(node_beadings, edge_junctions);
|
||||
|
||||
#ifdef ARACHNE_DEBUG
|
||||
export_graph_to_svg(debug_out_path("ST-generateSegments-junctions-%d.svg", iRun), this->graph, this->outline, edge_junctions);
|
||||
#endif
|
||||
|
||||
connectJunctions(edge_junctions);
|
||||
|
||||
|
||||
generateLocalMaximaSingleBeads();
|
||||
|
||||
#ifdef ARACHNE_DEBUG
|
||||
++iRun;
|
||||
#endif
|
||||
}
|
||||
|
||||
SkeletalTrapezoidation::edge_t* SkeletalTrapezoidation::getQuadMaxRedgeTo(edge_t* quad_start_edge)
|
||||
@ -1811,7 +2029,10 @@ void SkeletalTrapezoidation::generateJunctions(ptr_vector_t<BeadingPropagation>&
|
||||
for (junction_idx = (std::max(size_t(1), beading->toolpath_locations.size()) - 1) / 2; junction_idx < num_junctions; junction_idx--)
|
||||
{
|
||||
coord_t bead_R = beading->toolpath_locations[junction_idx];
|
||||
if (bead_R <= start_R)
|
||||
// toolpath_locations computed inside DistributedBeadingStrategy could be off by 1 because of rounding errors.
|
||||
// In GH issue #8472, these roundings errors caused missing the middle extrusion.
|
||||
// Adding small epsilon should help resolve those cases.
|
||||
if (bead_R <= start_R + 1)
|
||||
{ // Junction coinciding with start node is used in this function call
|
||||
break;
|
||||
}
|
||||
|
@ -18,6 +18,10 @@
|
||||
#include "SkeletalTrapezoidationJoint.hpp"
|
||||
#include "libslic3r/Arachne/BeadingStrategy/BeadingStrategy.hpp"
|
||||
#include "SkeletalTrapezoidationGraph.hpp"
|
||||
#include "../Geometry/Voronoi.hpp"
|
||||
|
||||
//#define ARACHNE_DEBUG
|
||||
//#define ARACHNE_DEBUG_VORONOI
|
||||
|
||||
namespace Slic3r::Arachne
|
||||
{
|
||||
@ -122,6 +126,10 @@ public:
|
||||
*/
|
||||
void generateToolpaths(std::vector<VariableWidthLines> &generated_toolpaths, bool filter_outermost_central_edges = false);
|
||||
|
||||
#ifdef ARACHNE_DEBUG
|
||||
Polygons outline;
|
||||
#endif
|
||||
|
||||
protected:
|
||||
/*!
|
||||
* Auxiliary for referencing one transition along an edge which may contain multiple transitions
|
||||
|
@ -24,30 +24,37 @@ namespace Slic3r::Arachne
|
||||
{
|
||||
|
||||
WallToolPaths::WallToolPaths(const Polygons& outline, const coord_t bead_width_0, const coord_t bead_width_x,
|
||||
const size_t inset_count, const coord_t wall_0_inset, const PrintObjectConfig &print_object_config, const PrintConfig &print_config)
|
||||
const size_t inset_count, const coord_t wall_0_inset, const coordf_t layer_height,
|
||||
const PrintObjectConfig &print_object_config, const PrintConfig &print_config)
|
||||
: outline(outline)
|
||||
, bead_width_0(bead_width_0)
|
||||
, bead_width_x(bead_width_x)
|
||||
, inset_count(inset_count)
|
||||
, wall_0_inset(wall_0_inset)
|
||||
, layer_height(layer_height)
|
||||
, print_thin_walls(Slic3r::Arachne::fill_outline_gaps)
|
||||
, min_feature_size(scaled<coord_t>(print_object_config.min_feature_size.value))
|
||||
, min_bead_width(scaled<coord_t>(print_object_config.min_bead_width.value))
|
||||
, small_area_length(static_cast<double>(bead_width_0) / 2.)
|
||||
, wall_transition_filter_deviation(scaled<coord_t>(print_object_config.wall_transition_filter_deviation.value))
|
||||
, wall_transition_length(scaled<coord_t>(print_object_config.wall_transition_length.value))
|
||||
, toolpaths_generated(false)
|
||||
, print_object_config(print_object_config)
|
||||
{
|
||||
if (const auto &min_bead_width_opt = print_object_config.min_bead_width; min_bead_width_opt.percent) {
|
||||
assert(!print_config.nozzle_diameter.empty());
|
||||
double min_nozzle_diameter = *std::min_element(print_config.nozzle_diameter.values.begin(), print_config.nozzle_diameter.values.end());
|
||||
this->min_bead_width = scaled<coord_t>(min_bead_width_opt.value * 0.01 * min_nozzle_diameter);
|
||||
}
|
||||
assert(!print_config.nozzle_diameter.empty());
|
||||
this->min_nozzle_diameter = float(*std::min_element(print_config.nozzle_diameter.values.begin(), print_config.nozzle_diameter.values.end()));
|
||||
|
||||
if (const auto &wall_transition_filter_deviation_opt = print_object_config.wall_transition_filter_deviation; wall_transition_filter_deviation_opt.percent) {
|
||||
assert(!print_config.nozzle_diameter.empty());
|
||||
double min_nozzle_diameter = *std::min_element(print_config.nozzle_diameter.values.begin(), print_config.nozzle_diameter.values.end());
|
||||
this->wall_transition_filter_deviation = scaled<coord_t>(wall_transition_filter_deviation_opt.value * 0.01 * min_nozzle_diameter);
|
||||
}
|
||||
if (const auto &min_feature_size_opt = print_object_config.min_feature_size; min_feature_size_opt.percent)
|
||||
this->min_feature_size = scaled<coord_t>(min_feature_size_opt.value * 0.01 * this->min_nozzle_diameter);
|
||||
|
||||
if (const auto &min_bead_width_opt = print_object_config.min_bead_width; min_bead_width_opt.percent)
|
||||
this->min_bead_width = scaled<coord_t>(min_bead_width_opt.value * 0.01 * this->min_nozzle_diameter);
|
||||
|
||||
if (const auto &wall_transition_filter_deviation_opt = print_object_config.wall_transition_filter_deviation; wall_transition_filter_deviation_opt.percent)
|
||||
this->wall_transition_filter_deviation = scaled<coord_t>(wall_transition_filter_deviation_opt.value * 0.01 * this->min_nozzle_diameter);
|
||||
|
||||
if (const auto &wall_transition_length_opt = print_object_config.wall_transition_length; wall_transition_length_opt.percent)
|
||||
this->wall_transition_length = scaled<coord_t>(wall_transition_length_opt.value * 0.01 * this->min_nozzle_diameter);
|
||||
}
|
||||
|
||||
void simplify(Polygon &thiss, const int64_t smallest_line_segment_squared, const int64_t allowed_error_distance_squared)
|
||||
@ -322,60 +329,46 @@ void removeSmallAreas(Polygons &thiss, const double min_area_size, const bool re
|
||||
};
|
||||
|
||||
auto new_end = thiss.end();
|
||||
if(remove_holes)
|
||||
{
|
||||
for(auto it = thiss.begin(); it < new_end; it++)
|
||||
{
|
||||
// All polygons smaller than target are removed by replacing them with a polygon from the back of the vector
|
||||
if(fabs(ClipperLib::Area(to_path(*it))) < min_area_size)
|
||||
{
|
||||
new_end--;
|
||||
if (remove_holes) {
|
||||
for (auto it = thiss.begin(); it < new_end;) {
|
||||
// All polygons smaller than target are removed by replacing them with a polygon from the back of the vector.
|
||||
if (fabs(ClipperLib::Area(to_path(*it))) < min_area_size) {
|
||||
--new_end;
|
||||
*it = std::move(*new_end);
|
||||
it--; // wind back the iterator such that the polygon just swaped in is checked next
|
||||
continue; // Don't increment the iterator such that the polygon just swapped in is checked next.
|
||||
}
|
||||
++it;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
// For each polygon, computes the signed area, move small outlines at the end of the vector and keep pointer on small holes
|
||||
std::vector<Polygon> small_holes;
|
||||
for(auto it = thiss.begin(); it < new_end; it++) {
|
||||
double area = ClipperLib::Area(to_path(*it));
|
||||
if (fabs(area) < min_area_size)
|
||||
{
|
||||
if(area >= 0)
|
||||
{
|
||||
new_end--;
|
||||
if(it < new_end) {
|
||||
for (auto it = thiss.begin(); it < new_end;) {
|
||||
if (double area = ClipperLib::Area(to_path(*it)); fabs(area) < min_area_size) {
|
||||
if (area >= 0) {
|
||||
--new_end;
|
||||
if (it < new_end) {
|
||||
std::swap(*new_end, *it);
|
||||
it--;
|
||||
}
|
||||
else
|
||||
{ // Don't self-swap the last Path
|
||||
continue;
|
||||
} else { // Don't self-swap the last Path
|
||||
break;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
small_holes.push_back(*it);
|
||||
}
|
||||
}
|
||||
++it;
|
||||
}
|
||||
|
||||
// Removes small holes that have their first point inside one of the removed outlines
|
||||
// Iterating in reverse ensures that unprocessed small holes won't be moved
|
||||
const auto removed_outlines_start = new_end;
|
||||
for(auto hole_it = small_holes.rbegin(); hole_it < small_holes.rend(); hole_it++)
|
||||
{
|
||||
for(auto outline_it = removed_outlines_start; outline_it < thiss.end() ; outline_it++)
|
||||
{
|
||||
if(Polygon(*outline_it).contains(*hole_it->begin())) {
|
||||
for (auto hole_it = small_holes.rbegin(); hole_it < small_holes.rend(); hole_it++)
|
||||
for (auto outline_it = removed_outlines_start; outline_it < thiss.end(); outline_it++)
|
||||
if (Polygon(*outline_it).contains(*hole_it->begin())) {
|
||||
new_end--;
|
||||
*hole_it = std::move(*new_end);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
thiss.resize(new_end-thiss.begin());
|
||||
}
|
||||
@ -481,7 +474,7 @@ const std::vector<VariableWidthLines> &WallToolPaths::generate()
|
||||
// The functions above could produce intersecting polygons that could cause a crash inside Arachne.
|
||||
// Applying Clipper union should be enough to get rid of this issue.
|
||||
// Clipper union also fixed an issue in Arachne that in post-processing Voronoi diagram, some edges
|
||||
// didn't have twin edges (this probably isn't an issue in Boost Voronoi generator).
|
||||
// didn't have twin edges. (a non-planar Voronoi diagram probably caused this).
|
||||
prepared_outline = union_(prepared_outline);
|
||||
|
||||
if (area(prepared_outline) <= 0) {
|
||||
@ -489,9 +482,12 @@ const std::vector<VariableWidthLines> &WallToolPaths::generate()
|
||||
return toolpaths;
|
||||
}
|
||||
|
||||
const coord_t wall_transition_length = scaled<coord_t>(this->print_object_config.wall_transition_length.value);
|
||||
const double wall_split_middle_threshold = this->print_object_config.wall_split_middle_threshold.value / 100.; // For an uneven nr. of lines: When to split the middle wall into two.
|
||||
const double wall_add_middle_threshold = this->print_object_config.wall_add_middle_threshold.value / 100.; // For an even nr. of lines: When to add a new middle in between the innermost two walls.
|
||||
const float external_perimeter_extrusion_width = Flow::rounded_rectangle_extrusion_width_from_spacing(unscale<float>(bead_width_0), float(this->layer_height));
|
||||
const float perimeter_extrusion_width = Flow::rounded_rectangle_extrusion_width_from_spacing(unscale<float>(bead_width_x), float(this->layer_height));
|
||||
|
||||
const double wall_split_middle_threshold = std::clamp(2. * unscaled<double>(this->min_bead_width) / external_perimeter_extrusion_width - 1., 0.01, 0.99); // For an uneven nr. of lines: When to split the middle wall into two.
|
||||
const double wall_add_middle_threshold = std::clamp(unscaled<double>(this->min_bead_width) / perimeter_extrusion_width, 0.01, 0.99); // For an even nr. of lines: When to add a new middle in between the innermost two walls.
|
||||
|
||||
const int wall_distribution_count = this->print_object_config.wall_distribution_count.value;
|
||||
const size_t max_bead_count = (inset_count < std::numeric_limits<coord_t>::max() / 2) ? 2 * inset_count : std::numeric_limits<coord_t>::max();
|
||||
const auto beading_strat = BeadingStrategyFactory::makeStrategy
|
||||
@ -619,6 +615,14 @@ void WallToolPaths::stitchToolPaths(std::vector<VariableWidthLines> &toolpaths,
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
// PolylineStitcher, in some cases, produced closed extrusion (polygons),
|
||||
// but the endpoints differ by a small distance. So we reconnect them.
|
||||
// FIXME Lukas H.: Investigate more deeply why it is happening.
|
||||
if (wall_polygon.junctions.front().p != wall_polygon.junctions.back().p &&
|
||||
(wall_polygon.junctions.back().p - wall_polygon.junctions.front().p).cast<double>().norm() < stitch_distance) {
|
||||
wall_polygon.junctions.emplace_back(wall_polygon.junctions.front());
|
||||
}
|
||||
wall_polygon.is_closed = true;
|
||||
wall_lines.emplace_back(std::move(wall_polygon)); // add stitched polygons to result
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ public:
|
||||
* \param inset_count The maximum number of parallel extrusion lines that make up the wall
|
||||
* \param wall_0_inset How far to inset the outer wall, to make it adhere better to other walls.
|
||||
*/
|
||||
WallToolPaths(const Polygons& outline, coord_t bead_width_0, coord_t bead_width_x, size_t inset_count, coord_t wall_0_inset, const PrintObjectConfig &print_object_config, const PrintConfig &print_config);
|
||||
WallToolPaths(const Polygons& outline, coord_t bead_width_0, coord_t bead_width_x, size_t inset_count, coord_t wall_0_inset, coordf_t layer_height, const PrintObjectConfig &print_object_config, const PrintConfig &print_config);
|
||||
|
||||
/*!
|
||||
* Generates the Toolpaths
|
||||
@ -110,14 +110,17 @@ private:
|
||||
coord_t bead_width_x; //<! The subsequently extrusion line width with which libArachne generates its walls if WallToolPaths was called with the nominal_bead_width Constructor this is the same as bead_width_0
|
||||
size_t inset_count; //<! The maximum number of walls to generate
|
||||
coord_t wall_0_inset; //<! How far to inset the outer wall. Should only be applied when printing the actual walls, not extra infill/skin/support walls.
|
||||
coordf_t layer_height;
|
||||
bool print_thin_walls; //<! Whether to enable the widening beading meta-strategy for thin features
|
||||
coord_t min_feature_size; //<! The minimum size of the features that can be widened by the widening beading meta-strategy. Features thinner than that will not be printed
|
||||
coord_t min_bead_width; //<! The minimum bead size to use when widening thin model features with the widening beading meta-strategy
|
||||
double small_area_length; //<! The length of the small features which are to be filtered out, this is squared into a surface
|
||||
coord_t wall_transition_filter_deviation; //!< The allowed line width deviation induced by filtering
|
||||
coord_t wall_transition_length;
|
||||
float min_nozzle_diameter;
|
||||
bool toolpaths_generated; //<! Are the toolpaths generated
|
||||
std::vector<VariableWidthLines> toolpaths; //<! The generated toolpaths
|
||||
Polygons inner_contour; //<! The inner contour of the generated toolpaths
|
||||
coord_t wall_transition_filter_deviation; //!< The allowed line width deviation induced by filtering
|
||||
const PrintObjectConfig &print_object_config;
|
||||
};
|
||||
|
||||
|
@ -268,13 +268,13 @@ void extrusion_paths_append(ExtrusionPaths &dst, const ClipperLib_Z::Paths &extr
|
||||
{
|
||||
for (const ClipperLib_Z::Path &extrusion_path : extrusion_paths) {
|
||||
ThickPolyline thick_polyline = Arachne::to_thick_polyline(extrusion_path);
|
||||
Slic3r::append(dst, thick_polyline_to_extrusion_paths(thick_polyline, role, flow, scaled<float>(0.05), SCALED_EPSILON));
|
||||
Slic3r::append(dst, thick_polyline_to_extrusion_paths(thick_polyline, role, flow, scaled<float>(0.05), float(SCALED_EPSILON)));
|
||||
}
|
||||
}
|
||||
|
||||
void extrusion_paths_append(ExtrusionPaths &dst, const Arachne::ExtrusionLine &extrusion, const ExtrusionRole role, const Flow &flow)
|
||||
{
|
||||
ThickPolyline thick_polyline = Arachne::to_thick_polyline(extrusion);
|
||||
Slic3r::append(dst, thick_polyline_to_extrusion_paths(thick_polyline, role, flow, scaled<float>(0.05), SCALED_EPSILON));
|
||||
Slic3r::append(dst, thick_polyline_to_extrusion_paths(thick_polyline, role, flow, scaled<float>(0.05), float(SCALED_EPSILON)));
|
||||
}
|
||||
} // namespace Slic3r
|
194
src/libslic3r/BranchingTree/BranchingTree.cpp
Normal file
@ -0,0 +1,194 @@
|
||||
#include "BranchingTree.hpp"
|
||||
#include "PointCloud.hpp"
|
||||
|
||||
#include <numeric>
|
||||
#include <optional>
|
||||
#include <algorithm>
|
||||
|
||||
#include "libslic3r/SLA/SupportTreeUtils.hpp"
|
||||
|
||||
namespace Slic3r { namespace branchingtree {
|
||||
|
||||
void build_tree(PointCloud &nodes, Builder &builder)
|
||||
{
|
||||
constexpr size_t initK = 5;
|
||||
|
||||
auto ptsqueue = nodes.start_queue();
|
||||
auto &properties = nodes.properties();
|
||||
|
||||
struct NodeDistance
|
||||
{
|
||||
size_t node_id = Node::ID_NONE;
|
||||
float dst_branching = NaNf;
|
||||
float dst_euql = NaNf;
|
||||
};
|
||||
auto distances = reserve_vector<NodeDistance>(initK);
|
||||
double prev_dist_max = 0.;
|
||||
size_t K = initK;
|
||||
bool routed = true;
|
||||
size_t node_id = Node::ID_NONE;
|
||||
|
||||
while ((!ptsqueue.empty() && builder.is_valid()) || !routed) {
|
||||
if (routed) {
|
||||
node_id = ptsqueue.top();
|
||||
ptsqueue.pop();
|
||||
}
|
||||
|
||||
Node node = nodes.get(node_id);
|
||||
nodes.mark_unreachable(node_id);
|
||||
|
||||
distances.clear();
|
||||
distances.reserve(K);
|
||||
|
||||
float dmax = 0.;
|
||||
nodes.foreach_reachable(
|
||||
node.pos,
|
||||
[&distances, &dmax](size_t id, float dst_branching, float dst_euql) {
|
||||
distances.emplace_back(NodeDistance{id, dst_branching, dst_euql});
|
||||
dmax = std::max(dmax, dst_euql);
|
||||
}, K, prev_dist_max);
|
||||
|
||||
std::sort(distances.begin(), distances.end(),
|
||||
[](auto &a, auto &b) { return a.dst_branching < b.dst_branching; });
|
||||
|
||||
if (distances.empty()) {
|
||||
builder.report_unroutable(node);
|
||||
K = initK;
|
||||
prev_dist_max = 0.;
|
||||
routed = true;
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
prev_dist_max = dmax;
|
||||
K *= 2;
|
||||
|
||||
auto closest_it = distances.begin();
|
||||
routed = false;
|
||||
while (closest_it != distances.end() && !routed && builder.is_valid()) {
|
||||
size_t closest_node_id = closest_it->node_id;
|
||||
Node closest_node = nodes.get(closest_node_id);
|
||||
|
||||
auto type = nodes.get_type(closest_node_id);
|
||||
float w = nodes.get(node_id).weight + closest_it->dst_branching;
|
||||
closest_node.Rmin = std::max(node.Rmin, closest_node.Rmin);
|
||||
|
||||
switch (type) {
|
||||
case BED: {
|
||||
closest_node.weight = w;
|
||||
if (closest_it->dst_branching > nodes.properties().max_branch_length()) {
|
||||
auto hl_br_len = float(nodes.properties().max_branch_length()) / 2.f;
|
||||
Node new_node {{node.pos.x(), node.pos.y(), node.pos.z() - hl_br_len}, node.Rmin};
|
||||
new_node.id = int(nodes.next_junction_id());
|
||||
new_node.weight = nodes.get(node_id).weight + hl_br_len;
|
||||
new_node.left = node.id;
|
||||
if ((routed = builder.add_bridge(node, new_node))) {
|
||||
size_t new_idx = nodes.insert_junction(new_node);
|
||||
ptsqueue.push(new_idx);
|
||||
}
|
||||
}
|
||||
else if ((routed = builder.add_ground_bridge(node, closest_node))) {
|
||||
closest_node.left = closest_node.right = node_id;
|
||||
nodes.get(closest_node_id) = closest_node;
|
||||
nodes.mark_unreachable(closest_node_id);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
case MESH: {
|
||||
closest_node.weight = w;
|
||||
if ((routed = builder.add_mesh_bridge(node, closest_node))) {
|
||||
closest_node.left = closest_node.right = node_id;
|
||||
nodes.get(closest_node_id) = closest_node;
|
||||
nodes.mark_unreachable(closest_node_id);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
case LEAF:
|
||||
case JUNCTION: {
|
||||
auto max_slope = float(properties.max_slope());
|
||||
|
||||
if (auto mergept = find_merge_pt(node.pos, closest_node.pos, max_slope)) {
|
||||
|
||||
float mergedist_closest = (*mergept - closest_node.pos).norm();
|
||||
float mergedist_node = (*mergept - node.pos).norm();
|
||||
float Wnode = nodes.get(node_id).weight;
|
||||
float Wclosest = nodes.get(closest_node_id).weight;
|
||||
float Wsum = std::max(Wnode, Wclosest);
|
||||
float distsum = std::max(mergedist_closest, mergedist_node);
|
||||
w = Wsum + distsum;
|
||||
|
||||
if (mergedist_closest > EPSILON && mergedist_node > EPSILON) {
|
||||
Node mergenode{*mergept, closest_node.Rmin};
|
||||
mergenode.weight = w;
|
||||
mergenode.id = int(nodes.next_junction_id());
|
||||
|
||||
if ((routed = builder.add_merger(node, closest_node, mergenode))) {
|
||||
mergenode.left = node_id;
|
||||
mergenode.right = closest_node_id;
|
||||
size_t new_idx = nodes.insert_junction(mergenode);
|
||||
ptsqueue.push(new_idx);
|
||||
size_t qid = nodes.get_queue_idx(closest_node_id);
|
||||
|
||||
if (qid != PointCloud::Unqueued)
|
||||
ptsqueue.remove(nodes.get_queue_idx(closest_node_id));
|
||||
|
||||
nodes.mark_unreachable(closest_node_id);
|
||||
}
|
||||
} else if (closest_node.pos.z() < node.pos.z() &&
|
||||
(closest_node.left == Node::ID_NONE ||
|
||||
closest_node.right == Node::ID_NONE)) {
|
||||
closest_node.weight = w;
|
||||
if ((routed = builder.add_bridge(node, closest_node))) {
|
||||
if (closest_node.left == Node::ID_NONE)
|
||||
closest_node.left = node_id;
|
||||
else if (closest_node.right == Node::ID_NONE)
|
||||
closest_node.right = node_id;
|
||||
|
||||
nodes.get(closest_node_id) = closest_node;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
case NONE:;
|
||||
}
|
||||
|
||||
++closest_it;
|
||||
}
|
||||
|
||||
if (routed) {
|
||||
prev_dist_max = 0.;
|
||||
K = initK;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void build_tree(const indexed_triangle_set &its,
|
||||
const std::vector<Node> &support_roots,
|
||||
Builder &builder,
|
||||
const Properties &properties)
|
||||
{
|
||||
PointCloud nodes(its, support_roots, properties);
|
||||
|
||||
build_tree(nodes, builder);
|
||||
}
|
||||
|
||||
ExPolygon make_bed_poly(const indexed_triangle_set &its)
|
||||
{
|
||||
auto bb = bounding_box(its);
|
||||
|
||||
BoundingBox bbcrd{scaled(to_2d(bb.min)), scaled(to_2d(bb.max))};
|
||||
bbcrd.offset(scaled(10.));
|
||||
Point min = bbcrd.min, max = bbcrd.max;
|
||||
ExPolygon ret = {{min.x(), min.y()},
|
||||
{max.x(), min.y()},
|
||||
{max.x(), max.y()},
|
||||
{min.x(), max.y()}};
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
}} // namespace Slic3r::branchingtree
|
143
src/libslic3r/BranchingTree/BranchingTree.hpp
Normal file
@ -0,0 +1,143 @@
|
||||
#ifndef SUPPORTTREEBRANCHING_HPP
|
||||
#define SUPPORTTREEBRANCHING_HPP
|
||||
|
||||
// For indexed_triangle_set
|
||||
#include <admesh/stl.h>
|
||||
|
||||
#include "libslic3r/ExPolygon.hpp"
|
||||
#include "libslic3r/BoundingBox.hpp"
|
||||
|
||||
namespace Slic3r { namespace branchingtree {
|
||||
|
||||
// Branching tree input parameters. This is an in-line fillable structure with
|
||||
// setters returning self references.
|
||||
class Properties
|
||||
{
|
||||
double m_max_slope = PI / 4.;
|
||||
double m_ground_level = 0.;
|
||||
double m_sampling_radius = .5;
|
||||
double m_max_branch_len = 10.;
|
||||
|
||||
ExPolygons m_bed_shape;
|
||||
|
||||
public:
|
||||
// Maximum slope for bridges of the tree
|
||||
Properties &max_slope(double val) noexcept
|
||||
{
|
||||
m_max_slope = val;
|
||||
return *this;
|
||||
}
|
||||
// Z level of the ground
|
||||
Properties &ground_level(double val) noexcept
|
||||
{
|
||||
m_ground_level = val;
|
||||
return *this;
|
||||
}
|
||||
// How far should sample points be in the mesh and the ground
|
||||
Properties &sampling_radius(double val) noexcept
|
||||
{
|
||||
m_sampling_radius = val;
|
||||
return *this;
|
||||
}
|
||||
// Shape of the print bed (ground)
|
||||
Properties &bed_shape(ExPolygons bed) noexcept
|
||||
{
|
||||
m_bed_shape = std::move(bed);
|
||||
return *this;
|
||||
}
|
||||
|
||||
Properties &max_branch_length(double val) noexcept
|
||||
{
|
||||
m_max_branch_len = val;
|
||||
return *this;
|
||||
}
|
||||
|
||||
double max_slope() const noexcept { return m_max_slope; }
|
||||
double ground_level() const noexcept { return m_ground_level; }
|
||||
double sampling_radius() const noexcept { return m_sampling_radius; }
|
||||
double max_branch_length() const noexcept { return m_max_branch_len; }
|
||||
const ExPolygons &bed_shape() const noexcept { return m_bed_shape; }
|
||||
};
|
||||
|
||||
// A junction of the branching tree with position and radius.
|
||||
struct Node
|
||||
{
|
||||
static constexpr int ID_NONE = -1;
|
||||
|
||||
int id = ID_NONE, left = ID_NONE, right = ID_NONE;
|
||||
|
||||
Vec3f pos;
|
||||
float Rmin = 0.f;
|
||||
|
||||
// Tracking the weight of each junction, which is essentially the sum of
|
||||
// the lenghts of all branches emanating from this junction.
|
||||
float weight = 0.f;
|
||||
|
||||
Node(const Vec3f &p, float r_min = .0f) : pos{p}, Rmin{r_min}, weight{0.f}
|
||||
{}
|
||||
};
|
||||
|
||||
// An output interface for the branching tree generator function. Consider each
|
||||
// method as a callback and implement the actions that need to be done.
|
||||
class Builder
|
||||
{
|
||||
public:
|
||||
virtual ~Builder() = default;
|
||||
|
||||
// A simple bridge from junction to junction.
|
||||
virtual bool add_bridge(const Node &from, const Node &to) = 0;
|
||||
|
||||
// An Y shaped structure with two starting points and a merge point below
|
||||
// them. The angles will respect the max_slope setting.
|
||||
virtual bool add_merger(const Node &node,
|
||||
const Node &closest,
|
||||
const Node &merge_node) = 0;
|
||||
|
||||
// Add an anchor bridge to the ground (print bed)
|
||||
virtual bool add_ground_bridge(const Node &from,
|
||||
const Node &to) = 0;
|
||||
|
||||
// Add an anchor bridge to the model body
|
||||
virtual bool add_mesh_bridge(const Node &from, const Node &to) = 0;
|
||||
|
||||
// Report nodes that can not be routed to an endpoint (model or ground)
|
||||
virtual void report_unroutable(const Node &j) = 0;
|
||||
|
||||
// If returns false, the tree building process shall stop
|
||||
virtual bool is_valid() const { return true; }
|
||||
};
|
||||
|
||||
// Build the actual tree.
|
||||
// its: The input mesh
|
||||
// support_leafs: The input support points
|
||||
// builder: The output interface, describes how to build the tree
|
||||
// properties: Parameters of the tree
|
||||
//
|
||||
// Notes:
|
||||
// The original algorithm implicitly ensures that the generated tree avoids
|
||||
// the model body. This implementation uses point sampling of the mesh thus an
|
||||
// explicit check is needed if the part of the tree being inserted properly
|
||||
// avoids the model. This can be done in the builder implementation. Each
|
||||
// method can return a boolean indicating whether the given branch can or
|
||||
// cannot be inserted. If a particular path is unavailable, the algorithm
|
||||
// will try a few other paths as well. If all of them fail, one of the
|
||||
// report_unroutable_* methods will be called as a last resort.
|
||||
void build_tree(const indexed_triangle_set &its,
|
||||
const std::vector<Node> &support_leafs,
|
||||
Builder &builder,
|
||||
const Properties &properties = {});
|
||||
|
||||
inline void build_tree(const indexed_triangle_set &its,
|
||||
const std::vector<Node> &support_leafs,
|
||||
Builder &&builder,
|
||||
const Properties &properties = {})
|
||||
{
|
||||
build_tree(its, support_leafs, builder, properties);
|
||||
}
|
||||
|
||||
// Helper function to derive a bed polygon only from the model bounding box.
|
||||
ExPolygon make_bed_poly(const indexed_triangle_set &its);
|
||||
|
||||
}} // namespace Slic3r::branchingtree
|
||||
|
||||
#endif // SUPPORTTREEBRANCHING_HPP
|
252
src/libslic3r/BranchingTree/PointCloud.cpp
Normal file
@ -0,0 +1,252 @@
|
||||
#include "PointCloud.hpp"
|
||||
|
||||
#include "libslic3r/Geometry.hpp"
|
||||
#include "libslic3r/Tesselate.hpp"
|
||||
|
||||
#include <igl/random_points_on_mesh.h>
|
||||
|
||||
namespace Slic3r { namespace branchingtree {
|
||||
|
||||
std::optional<Vec3f> find_merge_pt(const Vec3f &A,
|
||||
const Vec3f &B,
|
||||
float critical_angle)
|
||||
{
|
||||
// The idea is that A and B both have their support cones. But searching
|
||||
// for the intersection of these support cones is difficult and its enough
|
||||
// to reduce this problem to 2D and search for the intersection of two
|
||||
// rays that merge somewhere between A and B. The 2D plane is a vertical
|
||||
// slice of the 3D scene where the 2D Y axis is equal to the 3D Z axis and
|
||||
// the 2D X axis is determined by the XY direction of the AB vector.
|
||||
//
|
||||
// Z^
|
||||
// | A *
|
||||
// | . . B *
|
||||
// | . . . .
|
||||
// | . . . .
|
||||
// | . x .
|
||||
// -------------------> XY
|
||||
|
||||
// Determine the transformation matrix for the 2D projection:
|
||||
Vec3f diff = {B.x() - A.x(), B.y() - A.y(), 0.f};
|
||||
Vec3f dir = diff.normalized(); // TODO: avoid normalization
|
||||
|
||||
Eigen::Matrix<float, 2, 3> tr2D;
|
||||
tr2D.row(0) = Vec3f{dir.x(), dir.y(), dir.z()};
|
||||
tr2D.row(1) = Vec3f{0.f, 0.f, 1.f};
|
||||
|
||||
// Transform the 2 vectors A and B into 2D vector 'a' and 'b'. Here we can
|
||||
// omit 'a', pretend that its the origin and use BA as the vector b.
|
||||
Vec2f b = tr2D * (B - A);
|
||||
|
||||
// Get the square sine of the ray emanating from 'a' towards 'b'. This ray might
|
||||
// exceed the allowed angle but that is corrected subsequently.
|
||||
// The sign of the original sine is also needed, hence b.y is multiplied by
|
||||
// abs(b.y)
|
||||
float b_sqn = b.squaredNorm();
|
||||
float sin2sig_a = b_sqn > EPSILON ? (b.y() * std::abs(b.y())) / b_sqn : 0.f;
|
||||
|
||||
// sine2 from 'b' to 'a' is the opposite of sine2 from a to b
|
||||
float sin2sig_b = -sin2sig_a;
|
||||
|
||||
// Derive the allowed angles from the given critical angle.
|
||||
// critical_angle is measured from the horizontal X axis.
|
||||
// The rays need to go downwards which corresponds to negative angles
|
||||
|
||||
float sincrit = std::sin(critical_angle); // sine of the critical angle
|
||||
float sin2crit = -sincrit * sincrit; // signed sine squared
|
||||
sin2sig_a = std::min(sin2sig_a, sin2crit); // Do the angle saturation of both rays
|
||||
sin2sig_b = std::min(sin2sig_b, sin2crit); //
|
||||
float sin2_a = std::abs(sin2sig_a); // Get cosine squared values
|
||||
float sin2_b = std::abs(sin2sig_b);
|
||||
float cos2_a = 1.f - sin2_a;
|
||||
float cos2_b = 1.f - sin2_b;
|
||||
|
||||
// Derive the new direction vectors. This is by square rooting the sin2
|
||||
// and cos2 values and restoring the original signs
|
||||
Vec2f Da = {std::copysign(std::sqrt(cos2_a), b.x()), std::copysign(std::sqrt(sin2_a), sin2sig_a)};
|
||||
Vec2f Db = {-std::copysign(std::sqrt(cos2_b), b.x()), std::copysign(std::sqrt(sin2_b), sin2sig_b)};
|
||||
|
||||
// Determine where two rays ([0, 0], Da), (b, Db) intersect.
|
||||
// Based on
|
||||
// https://stackoverflow.com/questions/27459080/given-two-points-and-two-direction-vectors-find-the-point-where-they-intersect
|
||||
// One ray is emanating from (0, 0) so the formula is simplified
|
||||
double t1 = (Db.y() * b.x() - b.y() * Db.x()) /
|
||||
(Da.x() * Db.y() - Da.y() * Db.x());
|
||||
|
||||
Vec2f mp = t1 * Da;
|
||||
Vec3f Mp = A + tr2D.transpose() * mp;
|
||||
|
||||
return t1 >= 0.f ? Mp : Vec3f{};
|
||||
}
|
||||
|
||||
void to_eigen_mesh(const indexed_triangle_set &its,
|
||||
Eigen::MatrixXd &V,
|
||||
Eigen::MatrixXi &F)
|
||||
{
|
||||
V.resize(its.vertices.size(), 3);
|
||||
F.resize(its.indices.size(), 3);
|
||||
for (unsigned int i = 0; i < its.indices.size(); ++i)
|
||||
F.row(i) = its.indices[i];
|
||||
|
||||
for (unsigned int i = 0; i < its.vertices.size(); ++i)
|
||||
V.row(i) = its.vertices[i].cast<double>();
|
||||
}
|
||||
|
||||
std::vector<Node> sample_mesh(const indexed_triangle_set &its, double radius)
|
||||
{
|
||||
std::vector<Node> ret;
|
||||
|
||||
double surface_area = 0.;
|
||||
for (const Vec3i &face : its.indices) {
|
||||
std::array<Vec3f, 3> tri = {its.vertices[face(0)],
|
||||
its.vertices[face(1)],
|
||||
its.vertices[face(2)]};
|
||||
|
||||
auto U = tri[1] - tri[0], V = tri[2] - tri[0];
|
||||
surface_area += 0.5 * U.cross(V).norm();
|
||||
}
|
||||
|
||||
int N = surface_area / (PI * radius * radius);
|
||||
|
||||
Eigen::MatrixXd B;
|
||||
Eigen::MatrixXi FI;
|
||||
Eigen::MatrixXd V;
|
||||
Eigen::MatrixXi F;
|
||||
to_eigen_mesh(its, V, F);
|
||||
igl::random_points_on_mesh(N, V, F, B, FI);
|
||||
|
||||
ret.reserve(size_t(N));
|
||||
for (int i = 0; i < FI.size(); i++) {
|
||||
int face_id = FI(i);
|
||||
|
||||
if (face_id < 0 || face_id >= int(its.indices.size()))
|
||||
continue;
|
||||
|
||||
Vec3i face = its.indices[face_id];
|
||||
|
||||
if (face(0) >= int(its.vertices.size()) ||
|
||||
face(1) >= int(its.vertices.size()) ||
|
||||
face(2) >= int(its.vertices.size()))
|
||||
continue;
|
||||
|
||||
Vec3f c = B.row(i)(0) * its.vertices[face(0)] +
|
||||
B.row(i)(1) * its.vertices[face(1)] +
|
||||
B.row(i)(2) * its.vertices[face(2)];
|
||||
|
||||
ret.emplace_back(c);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::vector<Node> sample_bed(const ExPolygons &bed, float z, double radius)
|
||||
{
|
||||
std::vector<Vec3f> ret;
|
||||
|
||||
auto triangles = triangulate_expolygons_3d(bed, z);
|
||||
indexed_triangle_set its;
|
||||
its.vertices.reserve(triangles.size());
|
||||
|
||||
for (size_t i = 0; i < triangles.size(); i += 3) {
|
||||
its.vertices.emplace_back(triangles[i].cast<float>());
|
||||
its.vertices.emplace_back(triangles[i + 1].cast<float>());
|
||||
its.vertices.emplace_back(triangles[i + 2].cast<float>());
|
||||
|
||||
its.indices.emplace_back(i, i + 1, i + 2);
|
||||
}
|
||||
|
||||
return sample_mesh(its, radius);
|
||||
}
|
||||
|
||||
PointCloud::PointCloud(const indexed_triangle_set &M,
|
||||
std::vector<Node> support_leafs,
|
||||
const Properties &props)
|
||||
: PointCloud{sample_mesh(M, props.sampling_radius()),
|
||||
sample_bed(props.bed_shape(),
|
||||
props.ground_level(),
|
||||
props.sampling_radius()),
|
||||
std::move(support_leafs), props}
|
||||
{}
|
||||
|
||||
PointCloud::PointCloud(std::vector<Node> meshpts,
|
||||
std::vector<Node> bedpts,
|
||||
std::vector<Node> support_leafs,
|
||||
const Properties &props)
|
||||
: m_leafs{std::move(support_leafs)}
|
||||
, m_meshpoints{std::move(meshpts)}
|
||||
, m_bedpoints{std::move(bedpts)}
|
||||
, m_props{props}
|
||||
, cos2bridge_slope{std::cos(props.max_slope()) *
|
||||
std::abs(std::cos(props.max_slope()))}
|
||||
, MESHPTS_BEGIN{m_bedpoints.size()}
|
||||
, LEAFS_BEGIN{MESHPTS_BEGIN + m_meshpoints.size()}
|
||||
, JUNCTIONS_BEGIN{LEAFS_BEGIN + m_leafs.size()}
|
||||
, m_searchable_indices(JUNCTIONS_BEGIN + m_junctions.size(), true)
|
||||
, m_queue_indices(JUNCTIONS_BEGIN + m_junctions.size(), Unqueued)
|
||||
, m_reachable_cnt{JUNCTIONS_BEGIN + m_junctions.size()}
|
||||
{
|
||||
for (size_t i = 0; i < m_bedpoints.size(); ++i) {
|
||||
m_bedpoints[i].id = int(i);
|
||||
m_ktree.insert({m_bedpoints[i].pos, i});
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < m_meshpoints.size(); ++i) {
|
||||
Node &n = m_meshpoints[i];
|
||||
n.id = int(MESHPTS_BEGIN + i);
|
||||
m_ktree.insert({n.pos, n.id});
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < m_leafs.size(); ++i) {
|
||||
Node &n = m_leafs[i];
|
||||
n.id = int(LEAFS_BEGIN + i);
|
||||
m_ktree.insert({n.pos, n.id});
|
||||
}
|
||||
}
|
||||
|
||||
float PointCloud::get_distance(const Vec3f &p, size_t node_id) const
|
||||
{
|
||||
auto t = get_type(node_id);
|
||||
auto ret = std::numeric_limits<float>::infinity();
|
||||
const auto &node = get(node_id);
|
||||
|
||||
switch (t) {
|
||||
case MESH:
|
||||
case BED: {
|
||||
// Points of mesh or bed which are outside of the support cone of
|
||||
// 'pos' must be discarded.
|
||||
if (is_outside_support_cone(p, node.pos))
|
||||
ret = std::numeric_limits<float>::infinity();
|
||||
else
|
||||
ret = (node.pos - p).norm();
|
||||
|
||||
break;
|
||||
}
|
||||
case LEAF:
|
||||
case JUNCTION:{
|
||||
auto mergept = find_merge_pt(p, node.pos, m_props.max_slope());
|
||||
double maxL2 = m_props.max_branch_length() * m_props.max_branch_length();
|
||||
|
||||
if (!mergept || mergept->z() < (m_props.ground_level() + 2 * node.Rmin))
|
||||
ret = std::numeric_limits<float>::infinity();
|
||||
else if (double a = (node.pos - *mergept).squaredNorm(),
|
||||
b = (p - *mergept).squaredNorm();
|
||||
a < maxL2 && b < maxL2)
|
||||
ret = std::sqrt(b);
|
||||
|
||||
break;
|
||||
}
|
||||
case NONE:
|
||||
;
|
||||
}
|
||||
|
||||
// Setting the ret val to infinity will effectively discard this
|
||||
// connection of nodes. max_branch_length property is used here
|
||||
// to discard node=>node and node=>mesh connections longer than this
|
||||
// property.
|
||||
if (t != BED && ret > m_props.max_branch_length())
|
||||
ret = std::numeric_limits<float>::infinity();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
}} // namespace Slic3r::branchingtree
|
272
src/libslic3r/BranchingTree/PointCloud.hpp
Normal file
@ -0,0 +1,272 @@
|
||||
#ifndef POINTCLOUD_HPP
|
||||
#define POINTCLOUD_HPP
|
||||
|
||||
#include <optional>
|
||||
|
||||
#include "BranchingTree.hpp"
|
||||
|
||||
#include "libslic3r/Execution/Execution.hpp"
|
||||
#include "libslic3r/MutablePriorityQueue.hpp"
|
||||
|
||||
#include "libslic3r/BoostAdapter.hpp"
|
||||
#include "boost/geometry/index/rtree.hpp"
|
||||
|
||||
namespace Slic3r { namespace branchingtree {
|
||||
|
||||
std::optional<Vec3f> find_merge_pt(const Vec3f &A,
|
||||
const Vec3f &B,
|
||||
float max_slope);
|
||||
|
||||
void to_eigen_mesh(const indexed_triangle_set &its,
|
||||
Eigen::MatrixXd &V,
|
||||
Eigen::MatrixXi &F);
|
||||
|
||||
std::vector<Node> sample_mesh(const indexed_triangle_set &its, double radius);
|
||||
|
||||
std::vector<Node> sample_bed(const ExPolygons &bed,
|
||||
float z,
|
||||
double radius = 1.);
|
||||
|
||||
enum PtType { LEAF, MESH, BED, JUNCTION, NONE };
|
||||
|
||||
inline BoundingBox3Base<Vec3f> get_support_cone_bb(const Vec3f &p, const Properties &props)
|
||||
{
|
||||
double gnd = props.ground_level() - EPSILON;
|
||||
double h = p.z() - gnd;
|
||||
double phi = PI / 2 - props.max_slope();
|
||||
auto r = float(std::min(h * std::tan(phi), props.max_branch_length() * std::sin(phi)));
|
||||
|
||||
Vec3f bb_min = {p.x() - r, p.y() - r, float(gnd)};
|
||||
Vec3f bb_max = {p.x() + r, p.y() + r, p.z()};
|
||||
|
||||
return {bb_min, bb_max};
|
||||
}
|
||||
|
||||
// A cloud of points including support points, mesh points, junction points
|
||||
// and anchor points on the bed. Junction points can be added or removed, all
|
||||
// the other point types are established on creation and remain unchangeable.
|
||||
class PointCloud {
|
||||
std::vector<Node> m_leafs, m_junctions, m_meshpoints, m_bedpoints;
|
||||
|
||||
const branchingtree::Properties &m_props;
|
||||
|
||||
const double cos2bridge_slope;
|
||||
const size_t MESHPTS_BEGIN, LEAFS_BEGIN, JUNCTIONS_BEGIN;
|
||||
|
||||
private:
|
||||
|
||||
// These vectors have the same size as there are indices for nodes to keep
|
||||
// access complexity constant. WARN: there might be cache non-locality costs
|
||||
std::vector<bool> m_searchable_indices; // searchable flag value of a node
|
||||
std::vector<size_t> m_queue_indices; // queue id of a node if queued
|
||||
|
||||
size_t m_reachable_cnt;
|
||||
|
||||
struct CoordFn
|
||||
{
|
||||
const PointCloud *self;
|
||||
CoordFn(const PointCloud *s) : self{s} {}
|
||||
float operator()(size_t nodeid, size_t dim) const
|
||||
{
|
||||
return self->get(nodeid).pos(int(dim));
|
||||
}
|
||||
};
|
||||
|
||||
using PointIndexEl = std::pair<Vec3f, unsigned>;
|
||||
|
||||
boost::geometry::index::
|
||||
rtree<PointIndexEl, boost::geometry::index::rstar<16, 4> /* ? */>
|
||||
m_ktree;
|
||||
|
||||
bool is_outside_support_cone(const Vec3f &supp, const Vec3f &pt) const
|
||||
{
|
||||
Vec3d D = (pt - supp).cast<double>();
|
||||
double dot_sq = -D.z() * std::abs(-D.z());
|
||||
|
||||
return dot_sq < D.squaredNorm() * cos2bridge_slope;
|
||||
}
|
||||
|
||||
template<class PC>
|
||||
static auto *get_node(PC &&pc, size_t id)
|
||||
{
|
||||
auto *ret = decltype(pc.m_bedpoints.data())(nullptr);
|
||||
|
||||
switch(pc.get_type(id)) {
|
||||
case BED: ret = &pc.m_bedpoints[id]; break;
|
||||
case MESH: ret = &pc.m_meshpoints[id - pc.MESHPTS_BEGIN]; break;
|
||||
case LEAF: ret = &pc.m_leafs [id - pc.LEAFS_BEGIN]; break;
|
||||
case JUNCTION: ret = &pc.m_junctions[id - pc.JUNCTIONS_BEGIN]; break;
|
||||
case NONE: assert(false);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
static constexpr auto Unqueued = size_t(-1);
|
||||
|
||||
struct ZCompareFn
|
||||
{
|
||||
const PointCloud *self;
|
||||
ZCompareFn(const PointCloud *s) : self{s} {}
|
||||
bool operator()(size_t node_a, size_t node_b) const
|
||||
{
|
||||
return self->get(node_a).pos.z() > self->get(node_b).pos.z();
|
||||
}
|
||||
};
|
||||
|
||||
PointCloud(const indexed_triangle_set &M,
|
||||
std::vector<Node> support_leafs,
|
||||
const Properties &props);
|
||||
|
||||
PointCloud(std::vector<Node> meshpts,
|
||||
std::vector<Node> bedpts,
|
||||
std::vector<Node> support_leafs,
|
||||
const Properties &props);
|
||||
|
||||
PtType get_type(size_t node_id) const
|
||||
{
|
||||
PtType ret = NONE;
|
||||
|
||||
if (node_id < MESHPTS_BEGIN && !m_bedpoints.empty()) ret = BED;
|
||||
else if (node_id < LEAFS_BEGIN && !m_meshpoints.empty()) ret = MESH;
|
||||
else if (node_id < JUNCTIONS_BEGIN && !m_leafs.empty()) ret = LEAF;
|
||||
else if (node_id >= JUNCTIONS_BEGIN && !m_junctions.empty()) ret = JUNCTION;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
const Node &get(size_t node_id) const
|
||||
{
|
||||
return *get_node(*this, node_id);
|
||||
}
|
||||
|
||||
Node &get(size_t node_id)
|
||||
{
|
||||
return *get_node(*this, node_id);
|
||||
}
|
||||
|
||||
const Node *find(size_t node_id) const { return get_node(*this, node_id); }
|
||||
Node *find(size_t node_id) { return get_node(*this, node_id); }
|
||||
|
||||
// Return the original index of a leaf in the input array, if the given
|
||||
// node id is indeed of type SUPP
|
||||
int get_leaf_id(size_t node_id) const
|
||||
{
|
||||
return node_id >= LEAFS_BEGIN && node_id < JUNCTIONS_BEGIN ?
|
||||
node_id - LEAFS_BEGIN :
|
||||
Node::ID_NONE;
|
||||
}
|
||||
|
||||
size_t get_queue_idx(size_t node_id) const { return m_queue_indices[node_id]; }
|
||||
|
||||
float get_distance(const Vec3f &p, size_t node) const;
|
||||
|
||||
size_t next_junction_id() const
|
||||
{
|
||||
return JUNCTIONS_BEGIN + m_junctions.size();
|
||||
}
|
||||
|
||||
size_t insert_junction(const Node &p)
|
||||
{
|
||||
size_t new_id = next_junction_id();
|
||||
m_junctions.emplace_back(p);
|
||||
m_junctions.back().id = int(new_id);
|
||||
m_ktree.insert({m_junctions.back().pos, new_id});
|
||||
m_searchable_indices.emplace_back(true);
|
||||
m_queue_indices.emplace_back(Unqueued);
|
||||
++m_reachable_cnt;
|
||||
|
||||
return new_id;
|
||||
}
|
||||
|
||||
const std::vector<Node> &get_junctions() const noexcept { return m_junctions; }
|
||||
const std::vector<Node> &get_bedpoints() const noexcept { return m_bedpoints; }
|
||||
const std::vector<Node> &get_meshpoints() const noexcept { return m_meshpoints; }
|
||||
const std::vector<Node> &get_leafs() const noexcept { return m_leafs; }
|
||||
const Properties & properties() const noexcept { return m_props; }
|
||||
|
||||
void mark_unreachable(size_t node_id)
|
||||
{
|
||||
assert(node_id < m_searchable_indices.size());
|
||||
|
||||
m_searchable_indices[node_id] = false;
|
||||
m_queue_indices[node_id] = Unqueued;
|
||||
--m_reachable_cnt;
|
||||
}
|
||||
|
||||
size_t reachable_count() const { return m_reachable_cnt; }
|
||||
|
||||
template<class Fn>
|
||||
void foreach_reachable(const Vec3f &pos,
|
||||
Fn &&visitor,
|
||||
size_t k,
|
||||
double min_dist = 0.)
|
||||
{
|
||||
// Fake output iterator
|
||||
struct Output {
|
||||
const PointCloud *self;
|
||||
Vec3f p;
|
||||
Fn &visitorfn;
|
||||
|
||||
Output& operator *() { return *this; }
|
||||
void operator=(const PointIndexEl &el) {
|
||||
visitorfn(el.second, self->get_distance(p, el.second),
|
||||
(p - el.first).squaredNorm());
|
||||
}
|
||||
Output& operator++() { return *this; }
|
||||
};
|
||||
|
||||
namespace bgi = boost::geometry::index;
|
||||
float brln = 2 * m_props.max_branch_length();
|
||||
BoundingBox3Base<Vec3f> bb{{pos.x() - brln, pos.y() - brln,
|
||||
float(m_props.ground_level() - EPSILON)},
|
||||
{pos.x() + brln, pos.y() + brln,
|
||||
m_ktree.bounds().max_corner().get<Z>()}};
|
||||
|
||||
// Extend upwards to find mergable junctions and support points
|
||||
bb.max.z() = m_ktree.bounds().max_corner().get<Z>();
|
||||
|
||||
auto filter = bgi::satisfies(
|
||||
[this, &pos, min_dist](const PointIndexEl &e) {
|
||||
double D_branching = get_distance(pos, e.second);
|
||||
double D_euql = (pos - e.first).squaredNorm() ;
|
||||
return m_searchable_indices[e.second] &&
|
||||
!std::isinf(D_branching) && D_euql > min_dist;
|
||||
});
|
||||
|
||||
m_ktree.query(bgi::intersects(bb) && filter && bgi::nearest(pos, k),
|
||||
Output{this, pos, visitor});
|
||||
}
|
||||
|
||||
auto start_queue()
|
||||
{
|
||||
auto ptsqueue = make_mutable_priority_queue<size_t, true>(
|
||||
[this](size_t el, size_t idx) { m_queue_indices[el] = idx; },
|
||||
ZCompareFn{this});
|
||||
|
||||
ptsqueue.reserve(m_leafs.size());
|
||||
size_t iend = LEAFS_BEGIN + m_leafs.size();
|
||||
for (size_t i = LEAFS_BEGIN; i < iend; ++i)
|
||||
ptsqueue.push(i);
|
||||
|
||||
return ptsqueue;
|
||||
}
|
||||
};
|
||||
|
||||
template<class PC, class Fn> void traverse(PC &&pc, size_t root, Fn &&fn)
|
||||
{
|
||||
if (auto nodeptr = pc.find(root); nodeptr != nullptr) {
|
||||
auto &nroot = *nodeptr;
|
||||
fn(nroot);
|
||||
if (nroot.left >= 0) traverse(pc, nroot.left, fn);
|
||||
if (nroot.right >= 0) traverse(pc, nroot.right, fn);
|
||||
}
|
||||
}
|
||||
|
||||
void build_tree(PointCloud &pcloud, Builder &builder);
|
||||
|
||||
}} // namespace Slic3r::branchingtree
|
||||
|
||||
#endif // POINTCLOUD_HPP
|
@ -109,6 +109,8 @@ set(SLIC3R_SOURCES
|
||||
Format/SL1_SVG.cpp
|
||||
Format/pwmx.hpp
|
||||
Format/pwmx.cpp
|
||||
Format/STEP.hpp
|
||||
Format/STEP.cpp
|
||||
GCode/ThumbnailData.cpp
|
||||
GCode/ThumbnailData.hpp
|
||||
GCode/Thumbnails.cpp
|
||||
@ -319,6 +321,12 @@ set(SLIC3R_SOURCES
|
||||
SLA/ReprojectPointsOnMesh.hpp
|
||||
SLA/DefaultSupportTree.hpp
|
||||
SLA/DefaultSupportTree.cpp
|
||||
SLA/BranchingTreeSLA.hpp
|
||||
SLA/BranchingTreeSLA.cpp
|
||||
BranchingTree/BranchingTree.cpp
|
||||
BranchingTree/BranchingTree.hpp
|
||||
BranchingTree/PointCloud.cpp
|
||||
BranchingTree/PointCloud.hpp
|
||||
|
||||
Arachne/BeadingStrategy/BeadingStrategy.hpp
|
||||
Arachne/BeadingStrategy/BeadingStrategy.cpp
|
||||
@ -381,7 +389,7 @@ find_package(CGAL REQUIRED)
|
||||
cmake_policy(POP)
|
||||
|
||||
add_library(libslic3r_cgal STATIC MeshBoolean.cpp MeshBoolean.hpp TryCatchSignal.hpp
|
||||
TryCatchSignal.cpp)
|
||||
TryCatchSignal.cpp Geometry/VoronoiUtilsCgal.hpp Geometry/VoronoiUtilsCgal.cpp)
|
||||
target_include_directories(libslic3r_cgal PRIVATE ${CMAKE_CURRENT_BINARY_DIR})
|
||||
|
||||
# Reset compile options of libslic3r_cgal. Despite it being linked privately, CGAL options
|
||||
@ -436,6 +444,11 @@ target_link_libraries(libslic3r
|
||||
qoi
|
||||
)
|
||||
|
||||
if (APPLE)
|
||||
# TODO: we need to fix notarization with the separate shared library
|
||||
target_link_libraries(libslic3r OCCTWrapper)
|
||||
endif ()
|
||||
|
||||
if (TARGET OpenVDB::openvdb)
|
||||
target_link_libraries(libslic3r OpenVDB::openvdb)
|
||||
endif()
|
||||
|
@ -705,6 +705,8 @@ Slic3r::Polylines diff_pl(const Slic3r::Polylines &subject, const Slic3r::Polygo
|
||||
{ return _clipper_pl_open(ClipperLib::ctDifference, ClipperUtils::PolylinesProvider(subject), ClipperUtils::PolygonsProvider(clip)); }
|
||||
Slic3r::Polylines diff_pl(const Slic3r::Polyline &subject, const Slic3r::ExPolygon &clip)
|
||||
{ return _clipper_pl_open(ClipperLib::ctDifference, ClipperUtils::SinglePathProvider(subject.points), ClipperUtils::ExPolygonProvider(clip)); }
|
||||
Slic3r::Polylines diff_pl(const Slic3r::Polyline &subject, const Slic3r::ExPolygons &clip)
|
||||
{ return _clipper_pl_open(ClipperLib::ctDifference, ClipperUtils::SinglePathProvider(subject.points), ClipperUtils::ExPolygonsProvider(clip)); }
|
||||
Slic3r::Polylines diff_pl(const Slic3r::Polylines &subject, const Slic3r::ExPolygon &clip)
|
||||
{ return _clipper_pl_open(ClipperLib::ctDifference, ClipperUtils::PolylinesProvider(subject), ClipperUtils::ExPolygonProvider(clip)); }
|
||||
Slic3r::Polylines diff_pl(const Slic3r::Polylines &subject, const Slic3r::ExPolygons &clip)
|
||||
|
@ -408,6 +408,7 @@ Slic3r::ExPolygons diff_ex(const Slic3r::Surfaces &subject, const Slic3r::Surfac
|
||||
Slic3r::ExPolygons diff_ex(const Slic3r::SurfacesPtr &subject, const Slic3r::Polygons &clip, ApplySafetyOffset do_safety_offset = ApplySafetyOffset::No);
|
||||
Slic3r::Polylines diff_pl(const Slic3r::Polylines &subject, const Slic3r::Polygons &clip);
|
||||
Slic3r::Polylines diff_pl(const Slic3r::Polyline &subject, const Slic3r::ExPolygon &clip);
|
||||
Slic3r::Polylines diff_pl(const Slic3r::Polyline &subject, const Slic3r::ExPolygons &clip);
|
||||
Slic3r::Polylines diff_pl(const Slic3r::Polylines &subject, const Slic3r::ExPolygon &clip);
|
||||
Slic3r::Polylines diff_pl(const Slic3r::Polylines &subject, const Slic3r::ExPolygons &clip);
|
||||
Slic3r::Polylines diff_pl(const Slic3r::Polygons &subject, const Slic3r::Polygons &clip);
|
||||
|
@ -389,6 +389,7 @@ void Layer::make_fills(FillAdaptive::Octree* adaptive_fill_octree, FillAdaptive:
|
||||
params.anchor_length_max = surface_fill.params.anchor_length_max;
|
||||
params.resolution = resolution;
|
||||
params.use_arachne = perimeter_generator == PerimeterGeneratorType::Arachne && surface_fill.params.pattern == ipConcentric;
|
||||
params.layer_height = m_regions[surface_fill.region_id]->layer()->height;
|
||||
|
||||
for (ExPolygon &expoly : surface_fill.expolygons) {
|
||||
// Spacing is modified by the filler to indicate adjustments. Reset it for each expolygon.
|
||||
|
@ -61,6 +61,8 @@ struct FillParams
|
||||
|
||||
// For Concentric infill, to switch between Classic and Arachne.
|
||||
bool use_arachne { false };
|
||||
// Layer height for Concentric infill with Arachne.
|
||||
coordf_t layer_height { 0.f };
|
||||
};
|
||||
static_assert(IsTriviallyCopyable<FillParams>::value, "FillParams class is not POD (and it should be - see constructor).");
|
||||
|
||||
|
@ -77,8 +77,8 @@ void FillConcentric::_fill_surface_single(const FillParams ¶ms,
|
||||
|
||||
if (params.density > 0.9999f && !params.dont_adjust) {
|
||||
coord_t loops_count = std::max(bbox_size.x(), bbox_size.y()) / min_spacing + 1;
|
||||
Polygons polygons = offset(expolygon, min_spacing / 2);
|
||||
Arachne::WallToolPaths wallToolPaths(polygons, min_spacing, min_spacing, loops_count, 0, *this->print_object_config, *this->print_config);
|
||||
Polygons polygons = offset(expolygon, float(min_spacing) / 2.f);
|
||||
Arachne::WallToolPaths wallToolPaths(polygons, min_spacing, min_spacing, loops_count, 0, params.layer_height, *this->print_object_config, *this->print_config);
|
||||
|
||||
std::vector<Arachne::VariableWidthLines> loops = wallToolPaths.getToolPaths();
|
||||
std::vector<const Arachne::ExtrusionLine *> all_extrusions;
|
||||
|
@ -70,7 +70,7 @@ void Generator::generateInitialInternalOverhangs(const PrintObject &print_object
|
||||
// Remove the part of the infill area that is already supported by the walls.
|
||||
Polygons overhang = diff(offset(infill_area_here, -float(m_wall_supporting_radius)), infill_area_above);
|
||||
// Filter out unprintable polygons and near degenerated polygons (three almost collinear points and so).
|
||||
overhang = opening(overhang, SCALED_EPSILON, SCALED_EPSILON);
|
||||
overhang = opening(overhang, float(SCALED_EPSILON), float(SCALED_EPSILON));
|
||||
|
||||
m_overhang_per_layer[layer_nr] = overhang;
|
||||
infill_area_above = std::move(infill_area_here);
|
||||
|
@ -457,6 +457,7 @@ namespace Slic3r {
|
||||
|
||||
bool load_model_from_file(const std::string& filename, Model& model, DynamicPrintConfig& config, ConfigSubstitutionContext& config_substitutions, bool check_version);
|
||||
unsigned int version() const { return m_version; }
|
||||
boost::optional<Semver> prusaslicer_generator_version() const { return m_prusaslicer_generator_version; }
|
||||
|
||||
private:
|
||||
void _destroy_xml_parser();
|
||||
@ -3147,8 +3148,7 @@ bool _3MF_Exporter::_add_custom_gcode_per_print_z_file_to_archive( mz_zip_archiv
|
||||
}
|
||||
|
||||
// Perform conversions based on the config values available.
|
||||
//FIXME provide a version of PrusaSlicer that stored the project file (3MF).
|
||||
static void handle_legacy_project_loaded(unsigned int version_project_file, DynamicPrintConfig& config)
|
||||
static void handle_legacy_project_loaded(unsigned int version_project_file, DynamicPrintConfig& config, const boost::optional<Semver>& prusaslicer_generator_version)
|
||||
{
|
||||
if (! config.has("brim_separation")) {
|
||||
if (auto *opt_elephant_foot = config.option<ConfigOptionFloat>("elefant_foot_compensation", false); opt_elephant_foot) {
|
||||
@ -3157,6 +3157,23 @@ static void handle_legacy_project_loaded(unsigned int version_project_file, Dyna
|
||||
opt_brim_separation->value = opt_elephant_foot->value;
|
||||
}
|
||||
}
|
||||
|
||||
// In PrusaSlicer 2.5.0-alpha2 and 2.5.0-alpha3, we introduce several parameters for Arachne that depend
|
||||
// on nozzle size . Later we decided to make default values for those parameters computed automatically
|
||||
// until the user changes them.
|
||||
if (prusaslicer_generator_version && *prusaslicer_generator_version >= *Semver::parse("2.5.0-alpha2") && *prusaslicer_generator_version <= *Semver::parse("2.5.0-alpha3")) {
|
||||
if (auto *opt_wall_transition_length = config.option<ConfigOptionFloatOrPercent>("wall_transition_length", false);
|
||||
opt_wall_transition_length && !opt_wall_transition_length->percent && opt_wall_transition_length->value == 0.4) {
|
||||
opt_wall_transition_length->percent = true;
|
||||
opt_wall_transition_length->value = 100;
|
||||
}
|
||||
|
||||
if (auto *opt_min_feature_size = config.option<ConfigOptionFloatOrPercent>("min_feature_size", false);
|
||||
opt_min_feature_size && !opt_min_feature_size->percent && opt_min_feature_size->value == 0.1) {
|
||||
opt_min_feature_size->percent = true;
|
||||
opt_min_feature_size->value = 25;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool is_project_3mf(const std::string& filename)
|
||||
@ -3199,7 +3216,7 @@ bool load_3mf(const char* path, DynamicPrintConfig& config, ConfigSubstitutionCo
|
||||
_3MF_Importer importer;
|
||||
importer.load_model_from_file(path, *model, config, config_substitutions, check_version);
|
||||
importer.log_errors();
|
||||
handle_legacy_project_loaded(importer.version(), config);
|
||||
handle_legacy_project_loaded(importer.version(), config, importer.prusaslicer_generator_version());
|
||||
|
||||
return !model->objects.empty() || !config.empty();
|
||||
}
|
||||
|
131
src/libslic3r/Format/STEP.cpp
Normal file
@ -0,0 +1,131 @@
|
||||
#include "STEP.hpp"
|
||||
#include "occt_wrapper/OCCTWrapper.hpp"
|
||||
|
||||
#include "libslic3r/Model.hpp"
|
||||
#include "libslic3r/TriangleMesh.hpp"
|
||||
#include "libslic3r/Utils.hpp"
|
||||
|
||||
#include <boost/filesystem.hpp>
|
||||
#include <boost/dll/runtime_symbol_info.hpp>
|
||||
#include <boost/log/trivial.hpp>
|
||||
|
||||
#include <string>
|
||||
#include <functional>
|
||||
|
||||
#ifdef _WIN32
|
||||
#include<windows.h>
|
||||
#else
|
||||
#include<occt_wrapper/OCCTWrapper.hpp>
|
||||
#include <dlfcn.h>
|
||||
#endif
|
||||
|
||||
|
||||
namespace Slic3r {
|
||||
|
||||
#if __APPLE__
|
||||
extern "C" bool load_step_internal(const char *path, OCCTResult* res);
|
||||
#endif
|
||||
|
||||
LoadStepFn get_load_step_fn()
|
||||
{
|
||||
static LoadStepFn load_step_fn = nullptr;
|
||||
|
||||
#ifndef __APPLE__
|
||||
constexpr const char* fn_name = "load_step_internal";
|
||||
#endif
|
||||
|
||||
if (!load_step_fn) {
|
||||
auto libpath = boost::dll::program_location().parent_path();
|
||||
#ifdef _WIN32
|
||||
libpath /= "OCCTWrapper.dll";
|
||||
HMODULE module = LoadLibraryW(libpath.wstring().c_str());
|
||||
if (module == NULL)
|
||||
throw Slic3r::RuntimeError("Cannot load OCCTWrapper.dll");
|
||||
|
||||
try {
|
||||
FARPROC farproc = GetProcAddress(module, fn_name);
|
||||
if (! farproc) {
|
||||
DWORD ec = GetLastError();
|
||||
throw Slic3r::RuntimeError(std::string("Cannot load function from OCCTWrapper.dll: ") + fn_name
|
||||
+ "\n\nError code: " + std::to_string(ec));
|
||||
}
|
||||
load_step_fn = reinterpret_cast<LoadStepFn>(farproc);
|
||||
} catch (const Slic3r::RuntimeError&) {
|
||||
FreeLibrary(module);
|
||||
throw;
|
||||
}
|
||||
#elif __APPLE__
|
||||
load_step_fn = &load_step_internal;
|
||||
#else
|
||||
libpath /= "OCCTWrapper.so";
|
||||
void *plugin_ptr = dlopen(libpath.c_str(), RTLD_NOW | RTLD_GLOBAL);
|
||||
|
||||
if (plugin_ptr) {
|
||||
load_step_fn = reinterpret_cast<LoadStepFn>(dlsym(plugin_ptr, fn_name));
|
||||
if (!load_step_fn) {
|
||||
dlclose(plugin_ptr);
|
||||
throw Slic3r::RuntimeError(std::string("Cannot load function from OCCTWrapper.dll: ") + fn_name
|
||||
+ "\n\n" + dlerror());
|
||||
}
|
||||
} else {
|
||||
throw Slic3r::RuntimeError(std::string("Cannot load OCCTWrapper.dll:\n\n") + dlerror());
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
return load_step_fn;
|
||||
}
|
||||
|
||||
bool load_step(const char *path, Model *model /*BBS:, ImportStepProgressFn proFn*/)
|
||||
{
|
||||
OCCTResult occt_object;
|
||||
|
||||
LoadStepFn load_step_fn = get_load_step_fn();
|
||||
|
||||
if (!load_step_fn)
|
||||
return false;
|
||||
|
||||
load_step_fn(path, &occt_object);
|
||||
|
||||
assert(! occt_object.volumes.empty());
|
||||
|
||||
assert(boost::algorithm::iends_with(occt_object.object_name, ".stp")
|
||||
|| boost::algorithm::iends_with(occt_object.object_name, ".step"));
|
||||
occt_object.object_name.erase(occt_object.object_name.find("."));
|
||||
assert(! occt_object.object_name.empty());
|
||||
|
||||
|
||||
ModelObject* new_object = model->add_object();
|
||||
new_object->input_file = path;
|
||||
if (new_object->volumes.size() == 1 && ! occt_object.volumes.front().volume_name.empty())
|
||||
new_object->name = new_object->volumes.front()->name;
|
||||
else
|
||||
new_object->name = occt_object.object_name;
|
||||
|
||||
|
||||
for (size_t i=0; i<occt_object.volumes.size(); ++i) {
|
||||
indexed_triangle_set its;
|
||||
for (size_t j=0; j<occt_object.volumes[i].vertices.size(); ++j)
|
||||
its.vertices.emplace_back(Vec3f(occt_object.volumes[i].vertices[j][0],
|
||||
occt_object.volumes[i].vertices[j][1],
|
||||
occt_object.volumes[i].vertices[j][2]));
|
||||
for (size_t j=0; j<occt_object.volumes[i].indices.size(); ++j)
|
||||
its.indices.emplace_back(Vec3i(occt_object.volumes[i].indices[j][0],
|
||||
occt_object.volumes[i].indices[j][1],
|
||||
occt_object.volumes[i].indices[j][2]));
|
||||
its_merge_vertices(its, true);
|
||||
TriangleMesh triangle_mesh(std::move(its));
|
||||
ModelVolume* new_volume = new_object->add_volume(std::move(triangle_mesh));
|
||||
|
||||
new_volume->name = occt_object.volumes[i].volume_name.empty()
|
||||
? std::string("Part") + std::to_string(i+1)
|
||||
: occt_object.volumes[i].volume_name;
|
||||
new_volume->source.input_file = path;
|
||||
new_volume->source.object_idx = (int)model->objects.size() - 1;
|
||||
new_volume->source.volume_idx = (int)new_object->volumes.size() - 1;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
}; // namespace Slic3r
|
19
src/libslic3r/Format/STEP.hpp
Normal file
@ -0,0 +1,19 @@
|
||||
// Original implementation of STEP format import created by Bambulab.
|
||||
// https://github.com/bambulab/BambuStudio
|
||||
// Forked off commit 1555904, modified by Prusa Research.
|
||||
|
||||
#ifndef slic3r_Format_STEP_hpp_
|
||||
#define slic3r_Format_STEP_hpp_
|
||||
|
||||
namespace Slic3r {
|
||||
|
||||
class Model;
|
||||
|
||||
//typedef std::function<void(int load_stage, int current, int total, bool& cancel)> ImportStepProgressFn;
|
||||
|
||||
// Load a step file into a provided model.
|
||||
extern bool load_step(const char *path_str, Model *model /*LMBBS:, ImportStepProgressFn proFn = nullptr*/);
|
||||
|
||||
}; // namespace Slic3r
|
||||
|
||||
#endif /* slic3r_Format_STEP_hpp_ */
|
@ -3137,7 +3137,7 @@ bool GCode::needs_retraction(const Polyline &travel, ExtrusionRole role)
|
||||
if (role == erSupportMaterial) {
|
||||
const SupportLayer* support_layer = dynamic_cast<const SupportLayer*>(m_layer);
|
||||
//FIXME support_layer->support_islands.contains should use some search structure!
|
||||
if (support_layer != NULL && ! intersection_pl(travel, support_layer->support_islands).empty())
|
||||
if (support_layer != NULL && diff_pl(travel, support_layer->support_islands).empty())
|
||||
// skip retraction if this is a travel move inside a support material island
|
||||
//FIXME not retracting over a long path may cause oozing, which in turn may result in missing material
|
||||
// at the end of the extrusion path!
|
||||
|
@ -798,7 +798,8 @@ const std::vector<std::pair<GCodeProcessor::EProducer, std::string>> GCodeProces
|
||||
{ EProducer::Simplify3D, "G-Code generated by Simplify3D(R)" },
|
||||
{ EProducer::CraftWare, "CraftWare" },
|
||||
{ EProducer::ideaMaker, "ideaMaker" },
|
||||
{ EProducer::KissSlicer, "KISSlicer" }
|
||||
{ EProducer::KissSlicer, "KISSlicer" },
|
||||
{ EProducer::BambuStudio, "BambuStudio" }
|
||||
};
|
||||
|
||||
unsigned int GCodeProcessor::s_result_id = 0;
|
||||
@ -2054,6 +2055,7 @@ bool GCodeProcessor::process_producers_tags(const std::string_view comment)
|
||||
case EProducer::CraftWare: { return process_craftware_tags(comment); }
|
||||
case EProducer::ideaMaker: { return process_ideamaker_tags(comment); }
|
||||
case EProducer::KissSlicer: { return process_kissslicer_tags(comment); }
|
||||
case EProducer::BambuStudio: { return process_bambustudio_tags(comment); }
|
||||
default: { return false; }
|
||||
}
|
||||
}
|
||||
@ -2498,6 +2500,62 @@ bool GCodeProcessor::process_kissslicer_tags(const std::string_view comment)
|
||||
return false;
|
||||
}
|
||||
|
||||
bool GCodeProcessor::process_bambustudio_tags(const std::string_view comment)
|
||||
{
|
||||
// extrusion roles
|
||||
|
||||
std::string tag = "FEATURE: ";
|
||||
size_t pos = comment.find(tag);
|
||||
if (pos != comment.npos) {
|
||||
const std::string_view type = comment.substr(pos + tag.length());
|
||||
if (type == "Custom")
|
||||
set_extrusion_role(erCustom);
|
||||
else if (type == "Inner wall")
|
||||
set_extrusion_role(erPerimeter);
|
||||
else if (type == "Outer wall")
|
||||
set_extrusion_role(erExternalPerimeter);
|
||||
else if (type == "Overhang wall")
|
||||
set_extrusion_role(erOverhangPerimeter);
|
||||
else if (type == "Gap infill")
|
||||
set_extrusion_role(erGapFill);
|
||||
else if (type == "Bridge")
|
||||
set_extrusion_role(erBridgeInfill);
|
||||
else if (type == "Sparse infill")
|
||||
set_extrusion_role(erInternalInfill);
|
||||
else if (type == "Internal solid infill")
|
||||
set_extrusion_role(erSolidInfill);
|
||||
else if (type == "Top surface")
|
||||
set_extrusion_role(erTopSolidInfill);
|
||||
else if (type == "Bottom surface")
|
||||
set_extrusion_role(erNone);
|
||||
else if (type == "Ironing")
|
||||
set_extrusion_role(erIroning);
|
||||
else if (type == "Skirt")
|
||||
set_extrusion_role(erSkirt);
|
||||
else if (type == "Brim")
|
||||
set_extrusion_role(erSkirt);
|
||||
else if (type == "Support")
|
||||
set_extrusion_role(erSupportMaterial);
|
||||
else if (type == "Support interface")
|
||||
set_extrusion_role(erSupportMaterialInterface);
|
||||
else if (type == "Support transition")
|
||||
set_extrusion_role(erNone);
|
||||
else if (type == "Prime tower")
|
||||
set_extrusion_role(erWipeTower);
|
||||
else {
|
||||
set_extrusion_role(erNone);
|
||||
BOOST_LOG_TRIVIAL(warning) << "GCodeProcessor found unknown extrusion role: " << type;
|
||||
}
|
||||
|
||||
if (m_extrusion_role == erExternalPerimeter)
|
||||
m_seams_detector.activate(true);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool GCodeProcessor::detect_producer(const std::string_view comment)
|
||||
{
|
||||
for (const auto& [id, search_string] : Producers) {
|
||||
@ -2873,7 +2931,7 @@ void GCodeProcessor::process_G1(const GCodeReader::GCodeLine& line)
|
||||
#if ENABLE_PROCESS_G2_G3_LINES
|
||||
void GCodeProcessor::process_G2_G3(const GCodeReader::GCodeLine& line, bool clockwise)
|
||||
{
|
||||
if (!line.has('X') || !line.has('Y') || !line.has('I') || !line.has('J'))
|
||||
if (!line.has('I') || !line.has('J'))
|
||||
return;
|
||||
|
||||
// relative center
|
||||
@ -2904,7 +2962,7 @@ void GCodeProcessor::process_G2_G3(const GCodeReader::GCodeLine& line, bool cloc
|
||||
Vec3d relative_start() const { return start - center; }
|
||||
Vec3d relative_end() const { return end - center; }
|
||||
|
||||
bool closed() const { return end.isApprox(start); }
|
||||
bool is_full_circle() const { return std::abs(delta_x()) < EPSILON && std::abs(delta_y()) < EPSILON; }
|
||||
};
|
||||
|
||||
Arc arc;
|
||||
@ -2947,7 +3005,7 @@ void GCodeProcessor::process_G2_G3(const GCodeReader::GCodeLine& line, bool cloc
|
||||
const Vec3d rel_arc_end = arc.relative_end();
|
||||
|
||||
// arc angle
|
||||
if (arc.closed())
|
||||
if (arc.is_full_circle())
|
||||
arc.angle = 2.0 * PI;
|
||||
else {
|
||||
arc.angle = std::atan2(rel_arc_start.x() * rel_arc_end.y() - rel_arc_start.y() * rel_arc_end.x(),
|
||||
@ -2999,24 +3057,23 @@ void GCodeProcessor::process_G2_G3(const GCodeReader::GCodeLine& line, bool cloc
|
||||
// calculate arc segments
|
||||
// reference:
|
||||
// Prusa-Firmware\Firmware\motion_control.cpp - mc_arc()
|
||||
// https://github.com/prusa3d/Prusa-Firmware/blob/MK3/Firmware/motion_control.cpp
|
||||
|
||||
// segments count
|
||||
static const double MM_PER_ARC_SEGMENT = 1.0;
|
||||
const size_t segments = std::max<size_t>(std::floor(travel_length / MM_PER_ARC_SEGMENT), 1);
|
||||
static const double MM_PER_ARC_SEGMENT = 0.5;
|
||||
const size_t segments = std::ceil(travel_length / MM_PER_ARC_SEGMENT);
|
||||
assert(segments >= 1);
|
||||
|
||||
const double theta_per_segment = arc.angle / double(segments);
|
||||
const double z_per_segment = arc.delta_z() / double(segments);
|
||||
const double extruder_per_segment = (extrusion.has_value()) ? *extrusion / double(segments) : 0.0;
|
||||
|
||||
double cos_T = 1.0 - 0.5 * sqr(theta_per_segment); // Small angle approximation
|
||||
double sin_T = theta_per_segment;
|
||||
const double sq_theta_per_segment = sqr(theta_per_segment);
|
||||
const double cos_T = 1.0 - 0.5 * sq_theta_per_segment; // Small angle approximation
|
||||
const double sin_T = theta_per_segment - sq_theta_per_segment * theta_per_segment / 6.0; // Small angle approximation
|
||||
|
||||
AxisCoords prev_target = m_start_position;
|
||||
AxisCoords arc_target;
|
||||
double sin_Ti;
|
||||
double cos_Ti;
|
||||
double r_axisi;
|
||||
size_t count = 0;
|
||||
|
||||
// Initialize the linear axis
|
||||
arc_target[Z] = m_start_position[Z];
|
||||
@ -3030,22 +3087,23 @@ void GCodeProcessor::process_G2_G3(const GCodeReader::GCodeLine& line, bool cloc
|
||||
|
||||
std::string gcode;
|
||||
|
||||
for (size_t i = 1; i < segments; ++i) { // Increment (segments-1)
|
||||
if (count < N_ARC_CORRECTION) {
|
||||
// Apply vector rotation matrix
|
||||
r_axisi = curr_rel_arc_start.x() * sin_T + curr_rel_arc_start.y() * cos_T;
|
||||
curr_rel_arc_start.x() = curr_rel_arc_start.x() * cos_T - curr_rel_arc_start.y() * sin_T;
|
||||
curr_rel_arc_start.y() = r_axisi;
|
||||
count++;
|
||||
}
|
||||
else {
|
||||
// Arc correction to radius vector. Computed only every N_ARC_CORRECTION increments.
|
||||
// Compute exact location by applying transformation matrix from initial radius vector(=-offset).
|
||||
cos_Ti = ::cos(double(i) * theta_per_segment);
|
||||
sin_Ti = ::sin(double(i) * theta_per_segment);
|
||||
size_t n_arc_correction = N_ARC_CORRECTION;
|
||||
|
||||
for (size_t i = 1; i < segments; ++i) {
|
||||
if (n_arc_correction-- == 0) {
|
||||
// Calculate the actual position for r_axis_x and r_axis_y
|
||||
const double cos_Ti = ::cos((double)i * theta_per_segment);
|
||||
const double sin_Ti = ::sin((double)i * theta_per_segment);
|
||||
curr_rel_arc_start.x() = -double(rel_center.x()) * cos_Ti + double(rel_center.y()) * sin_Ti;
|
||||
curr_rel_arc_start.y() = -double(rel_center.x()) * sin_Ti - double(rel_center.y()) * cos_Ti;
|
||||
count = 0;
|
||||
// reset n_arc_correction
|
||||
n_arc_correction = N_ARC_CORRECTION;
|
||||
}
|
||||
else {
|
||||
// Calculate X and Y using the small angle approximation
|
||||
const float r_axisi = curr_rel_arc_start.x() * sin_T + curr_rel_arc_start.y() * cos_T;
|
||||
curr_rel_arc_start.x() = curr_rel_arc_start.x() * cos_T - curr_rel_arc_start.y() * sin_T;
|
||||
curr_rel_arc_start.y() = r_axisi;
|
||||
}
|
||||
|
||||
// Update arc_target location
|
||||
@ -3721,8 +3779,13 @@ void GCodeProcessor::post_process()
|
||||
}
|
||||
|
||||
auto process_used_filament = [&](std::string& gcode_line) {
|
||||
auto process_tag = [](std::string& gcode_line, const std::string& tag, const std::vector<double>& values) {
|
||||
if (boost::algorithm::istarts_with(gcode_line, tag)) {
|
||||
// Prefilter for parsing speed.
|
||||
if (gcode_line.size() < 8 || gcode_line[0] != ';' || gcode_line[1] != ' ')
|
||||
return false;
|
||||
if (const char c = gcode_line[2]; c != 'f' && c != 't')
|
||||
return false;
|
||||
auto process_tag = [](std::string& gcode_line, const std::string_view tag, const std::vector<double>& values) {
|
||||
if (boost::algorithm::starts_with(gcode_line, tag)) {
|
||||
gcode_line = tag;
|
||||
char buf[1024];
|
||||
for (size_t i = 0; i < values.size(); ++i) {
|
||||
|
@ -590,7 +590,8 @@ namespace Slic3r {
|
||||
Simplify3D,
|
||||
CraftWare,
|
||||
ideaMaker,
|
||||
KissSlicer
|
||||
KissSlicer,
|
||||
BambuStudio
|
||||
};
|
||||
|
||||
static const std::vector<std::pair<GCodeProcessor::EProducer, std::string>> Producers;
|
||||
@ -658,6 +659,7 @@ namespace Slic3r {
|
||||
bool process_craftware_tags(const std::string_view comment);
|
||||
bool process_ideamaker_tags(const std::string_view comment);
|
||||
bool process_kissslicer_tags(const std::string_view comment);
|
||||
bool process_bambustudio_tags(const std::string_view comment);
|
||||
|
||||
bool detect_producer(const std::string_view comment);
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include <memory.h>
|
||||
#include <string.h>
|
||||
#include <float.h>
|
||||
#include <cstring>
|
||||
#include <cfloat>
|
||||
|
||||
#include "../libslic3r.h"
|
||||
#include "../PrintConfig.hpp"
|
||||
@ -31,7 +31,8 @@ PressureEqualizer::PressureEqualizer(const Slic3r::GCodeConfig &config) : m_use_
|
||||
{
|
||||
// Preallocate some data, so that output_buffer.data() will return an empty string.
|
||||
output_buffer.assign(32, 0);
|
||||
output_buffer_length = 0;
|
||||
output_buffer_length = 0;
|
||||
output_buffer_prev_length = 0;
|
||||
|
||||
m_current_extruder = 0;
|
||||
// Zero the position of the XYZE axes + the current feed
|
||||
@ -58,13 +59,14 @@ PressureEqualizer::PressureEqualizer(const Slic3r::GCodeConfig &config) : m_use_
|
||||
extrusion_rate_slope.positive = m_max_volumetric_extrusion_rate_slope_positive;
|
||||
}
|
||||
|
||||
// Don't regulate the pressure in infill, gap fill and ironing.
|
||||
// TODO: Do we want to regulate pressure in erWipeTower, erCustom and erMixed?
|
||||
for (const ExtrusionRole er : {erBridgeInfill, erGapFill, erIroning}) {
|
||||
// Don't regulate the pressure before and after gap-fill and ironing.
|
||||
for (const ExtrusionRole er : {erGapFill, erIroning}) {
|
||||
m_max_volumetric_extrusion_rate_slopes[er].negative = 0;
|
||||
m_max_volumetric_extrusion_rate_slopes[er].positive = 0;
|
||||
}
|
||||
|
||||
opened_extrude_set_speed_block = false;
|
||||
|
||||
#ifdef PRESSURE_EQUALIZER_STATISTIC
|
||||
m_stat.reset();
|
||||
#endif
|
||||
@ -93,6 +95,7 @@ void PressureEqualizer::process_layer(const std::string &gcode)
|
||||
if (*gcode_begin == '\n')
|
||||
++gcode_begin;
|
||||
}
|
||||
assert(!this->opened_extrude_set_speed_block);
|
||||
}
|
||||
}
|
||||
|
||||
@ -114,9 +117,10 @@ LayerResult PressureEqualizer::process_layer(LayerResult &&input)
|
||||
LayerResult *prev_layer_result = m_layer_results.front();
|
||||
m_layer_results.pop();
|
||||
|
||||
output_buffer_length = 0;
|
||||
output_buffer_length = 0;
|
||||
output_buffer_prev_length = 0;
|
||||
for (size_t line_idx = 0; line_idx < next_layer_first_idx; ++line_idx)
|
||||
output_gcode_line(m_gcode_lines[line_idx]);
|
||||
output_gcode_line(line_idx);
|
||||
m_gcode_lines.erase(m_gcode_lines.begin(), m_gcode_lines.begin() + int(next_layer_first_idx));
|
||||
|
||||
if (output_buffer_length > 0)
|
||||
@ -131,9 +135,9 @@ LayerResult PressureEqualizer::process_layer(LayerResult &&input)
|
||||
// Is a white space?
|
||||
static inline bool is_ws(const char c) { return c == ' ' || c == '\t'; }
|
||||
// Is it an end of line? Consider a comment to be an end of line as well.
|
||||
static inline bool is_eol(const char c) { return c == 0 || c == '\r' || c == '\n' || c == ';'; };
|
||||
static inline bool is_eol(const char c) { return c == 0 || c == '\r' || c == '\n' || c == ';'; }
|
||||
// Is it a white space or end of line?
|
||||
static inline bool is_ws_or_eol(const char c) { return is_ws(c) || is_eol(c); };
|
||||
static inline bool is_ws_or_eol(const char c) { return is_ws(c) || is_eol(c); }
|
||||
|
||||
// Eat whitespaces.
|
||||
static void eatws(const char *&line)
|
||||
@ -152,7 +156,7 @@ static inline int parse_int(const char *&line)
|
||||
throw Slic3r::InvalidArgument("PressureEqualizer: Error parsing an int");
|
||||
line = endptr;
|
||||
return int(result);
|
||||
};
|
||||
}
|
||||
|
||||
float string_to_float_decimal_point(const char *line, const size_t str_len, size_t* pos)
|
||||
{
|
||||
@ -173,7 +177,7 @@ static inline float parse_float(const char *&line, const size_t line_length)
|
||||
throw Slic3r::RuntimeError("PressureEqualizer: Error parsing a float");
|
||||
line = line + endptr;
|
||||
return result;
|
||||
};
|
||||
}
|
||||
|
||||
bool PressureEqualizer::process_line(const char *line, const char *line_end, GCodeLine &buf)
|
||||
{
|
||||
@ -209,6 +213,16 @@ bool PressureEqualizer::process_line(const char *line, const char *line_end, GCo
|
||||
buf.max_volumetric_extrusion_rate_slope_negative = 0.f;
|
||||
buf.extrusion_role = m_current_extrusion_role;
|
||||
|
||||
std::string str_line(line, line_end);
|
||||
const bool found_extrude_set_speed_tag = boost::contains(str_line, EXTRUDE_SET_SPEED_TAG);
|
||||
const bool found_extrude_end_tag = boost::contains(str_line, EXTRUDE_END_TAG);
|
||||
assert(!found_extrude_set_speed_tag || !found_extrude_end_tag);
|
||||
|
||||
if (found_extrude_set_speed_tag)
|
||||
this->opened_extrude_set_speed_block = true;
|
||||
else if (found_extrude_end_tag)
|
||||
this->opened_extrude_set_speed_block = false;
|
||||
|
||||
// Parse the G-code line, store the result into the buf.
|
||||
switch (toupper(*line ++)) {
|
||||
case 'G': {
|
||||
@ -228,6 +242,9 @@ bool PressureEqualizer::process_line(const char *line, const char *line_end, GCo
|
||||
case 1:
|
||||
{
|
||||
// G0, G1: A FFF 3D printer does not make a difference between the two.
|
||||
buf.adjustable_flow = this->opened_extrude_set_speed_block;
|
||||
buf.extrude_set_speed_tag = found_extrude_set_speed_tag;
|
||||
buf.extrude_end_tag = found_extrude_end_tag;
|
||||
float new_pos[5];
|
||||
memcpy(new_pos, m_current_pos, sizeof(float)*5);
|
||||
bool changed[5] = { false, false, false, false, false };
|
||||
@ -372,8 +389,9 @@ bool PressureEqualizer::process_line(const char *line, const char *line_end, GCo
|
||||
return true;
|
||||
}
|
||||
|
||||
void PressureEqualizer::output_gcode_line(GCodeLine &line)
|
||||
void PressureEqualizer::output_gcode_line(const size_t line_idx)
|
||||
{
|
||||
GCodeLine &line = m_gcode_lines[line_idx];
|
||||
if (!line.modified) {
|
||||
push_to_output(line.raw.data(), line.raw_length, true);
|
||||
return;
|
||||
@ -389,7 +407,7 @@ void PressureEqualizer::output_gcode_line(GCodeLine &line)
|
||||
// Emit the line with lowered extrusion rates.
|
||||
float l = line.dist_xyz();
|
||||
if (auto nSegments = size_t(ceil(l / max_segment_length)); nSegments == 1) { // Just update this segment.
|
||||
push_line_to_output(line, line.feedrate() * line.volumetric_correction_avg(), comment);
|
||||
push_line_to_output(line_idx, line.feedrate() * line.volumetric_correction_avg(), comment);
|
||||
} else {
|
||||
bool accelerating = line.volumetric_extrusion_rate_start < line.volumetric_extrusion_rate_end;
|
||||
// Update the initial and final feed rate values.
|
||||
@ -439,7 +457,7 @@ void PressureEqualizer::output_gcode_line(GCodeLine &line)
|
||||
line.pos_end[i] = pos_start[i] + (pos_end[i] - pos_start[i]) * t;
|
||||
line.pos_provided[i] = true;
|
||||
}
|
||||
push_line_to_output(line, pos_start[4], comment);
|
||||
push_line_to_output(line_idx, pos_start[4], comment);
|
||||
comment = nullptr;
|
||||
|
||||
float new_pos_start_feedrate = pos_start[4];
|
||||
@ -459,7 +477,7 @@ void PressureEqualizer::output_gcode_line(GCodeLine &line)
|
||||
line.pos_provided[j] = true;
|
||||
}
|
||||
// Interpolate the feed rate at the center of the segment.
|
||||
push_line_to_output(line, pos_start[4] + (pos_end[4] - pos_start[4]) * (float(i) - 0.5f) / float(nSegments), comment);
|
||||
push_line_to_output(line_idx, pos_start[4] + (pos_end[4] - pos_start[4]) * (float(i) - 0.5f) / float(nSegments), comment);
|
||||
comment = nullptr;
|
||||
memcpy(line.pos_start, line.pos_end, sizeof(float)*5);
|
||||
}
|
||||
@ -468,13 +486,13 @@ void PressureEqualizer::output_gcode_line(GCodeLine &line)
|
||||
line.pos_end[i] = pos_end2[i];
|
||||
line.pos_provided[i] = true;
|
||||
}
|
||||
push_line_to_output(line, pos_end[4], comment);
|
||||
push_line_to_output(line_idx, pos_end[4], comment);
|
||||
} else {
|
||||
for (int i = 0; i < 4; ++ i) {
|
||||
line.pos_end[i] = pos_end[i];
|
||||
line.pos_provided[i] = true;
|
||||
}
|
||||
push_line_to_output(line, pos_end[4], comment);
|
||||
push_line_to_output(line_idx, pos_end[4], comment);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -501,6 +519,11 @@ void PressureEqualizer::adjust_volumetric_rate()
|
||||
for (; !m_gcode_lines[idx_prev].extruding() && idx_prev != fist_line_idx; --idx_prev);
|
||||
if (!m_gcode_lines[idx_prev].extruding())
|
||||
break;
|
||||
// Don't decelerate before ironing and gap-fill.
|
||||
if (m_gcode_lines[line_idx].extrusion_role == erIroning || m_gcode_lines[line_idx].extrusion_role == erGapFill) {
|
||||
line_idx = idx_prev;
|
||||
continue;
|
||||
}
|
||||
// Volumetric extrusion rate at the start of the succeding segment.
|
||||
float rate_succ = m_gcode_lines[line_idx].volumetric_extrusion_rate_start;
|
||||
// What is the gradient of the extrusion rate between idx_prev and idx?
|
||||
@ -517,7 +540,7 @@ void PressureEqualizer::adjust_volumetric_rate()
|
||||
// Limit by the succeeding volumetric flow rate.
|
||||
rate_end = rate_succ;
|
||||
|
||||
if (line.extrusion_role == erExternalPerimeter || line.extrusion_role == erGapFill || line.extrusion_role == erBridgeInfill || line.extrusion_role == erIroning) {
|
||||
if (!line.adjustable_flow || line.extrusion_role == erExternalPerimeter || line.extrusion_role == erGapFill || line.extrusion_role == erBridgeInfill || line.extrusion_role == erIroning) {
|
||||
rate_end = line.volumetric_extrusion_rate_end;
|
||||
} else if (line.volumetric_extrusion_rate_end > rate_end) {
|
||||
line.volumetric_extrusion_rate_end = rate_end;
|
||||
@ -529,16 +552,20 @@ void PressureEqualizer::adjust_volumetric_rate()
|
||||
// Use the original, 'floating' extrusion rate as a starting point for the limiter.
|
||||
}
|
||||
|
||||
float rate_start = rate_end + rate_slope * line.time_corrected();
|
||||
if (rate_start < line.volumetric_extrusion_rate_start) {
|
||||
// Limit the volumetric extrusion rate at the start of this segment due to a segment
|
||||
// of ExtrusionType iRole, which will be extruded in the future.
|
||||
line.volumetric_extrusion_rate_start = rate_start;
|
||||
line.max_volumetric_extrusion_rate_slope_negative = rate_slope;
|
||||
line.modified = true;
|
||||
if (line.adjustable_flow) {
|
||||
float rate_start = rate_end + rate_slope * line.time_corrected();
|
||||
if (rate_start < line.volumetric_extrusion_rate_start) {
|
||||
// Limit the volumetric extrusion rate at the start of this segment due to a segment
|
||||
// of ExtrusionType iRole, which will be extruded in the future.
|
||||
line.volumetric_extrusion_rate_start = rate_start;
|
||||
line.max_volumetric_extrusion_rate_slope_negative = rate_slope;
|
||||
line.modified = true;
|
||||
}
|
||||
}
|
||||
// feedrate_per_extrusion_role[iRole] = (iRole == line.extrusion_role) ? line.volumetric_extrusion_rate_start : rate_start;
|
||||
feedrate_per_extrusion_role[iRole] = line.volumetric_extrusion_rate_start;
|
||||
// Don't store feed rate for ironing and gap-fill.
|
||||
if (line.extrusion_role != erIroning && line.extrusion_role != erGapFill)
|
||||
feedrate_per_extrusion_role[iRole] = line.volumetric_extrusion_rate_start;
|
||||
}
|
||||
}
|
||||
|
||||
@ -551,6 +578,11 @@ void PressureEqualizer::adjust_volumetric_rate()
|
||||
for (; !m_gcode_lines[idx_next].extruding() && idx_next != last_line_idx; ++idx_next);
|
||||
if (!m_gcode_lines[idx_next].extruding())
|
||||
break;
|
||||
// Don't accelerate after ironing and gap-fill.
|
||||
if (m_gcode_lines[line_idx].extrusion_role == erIroning || m_gcode_lines[line_idx].extrusion_role == erGapFill) {
|
||||
line_idx = idx_next;
|
||||
continue;
|
||||
}
|
||||
float rate_prec = m_gcode_lines[line_idx].volumetric_extrusion_rate_end;
|
||||
// What is the gradient of the extrusion rate between idx_prev and idx?
|
||||
line_idx = idx_next;
|
||||
@ -562,7 +594,7 @@ void PressureEqualizer::adjust_volumetric_rate()
|
||||
continue; // The positive rate is unlimited or the rate for ExtrusionRole iRole is unlimited.
|
||||
|
||||
float rate_start = feedrate_per_extrusion_role[iRole];
|
||||
if (line.extrusion_role == erExternalPerimeter || line.extrusion_role == erGapFill || line.extrusion_role == erBridgeInfill || line.extrusion_role == erIroning) {
|
||||
if (!line.adjustable_flow || line.extrusion_role == erExternalPerimeter || line.extrusion_role == erGapFill || line.extrusion_role == erBridgeInfill || line.extrusion_role == erIroning) {
|
||||
rate_start = line.volumetric_extrusion_rate_start;
|
||||
} else if (iRole == line.extrusion_role && rate_prec < rate_start)
|
||||
rate_start = rate_prec;
|
||||
@ -575,16 +607,21 @@ void PressureEqualizer::adjust_volumetric_rate()
|
||||
} else {
|
||||
// Use the original, 'floating' extrusion rate as a starting point for the limiter.
|
||||
}
|
||||
float rate_end = rate_start + rate_slope * line.time_corrected();
|
||||
if (rate_end < line.volumetric_extrusion_rate_end) {
|
||||
// Limit the volumetric extrusion rate at the start of this segment due to a segment
|
||||
// of ExtrusionType iRole, which was extruded before.
|
||||
line.volumetric_extrusion_rate_end = rate_end;
|
||||
line.max_volumetric_extrusion_rate_slope_positive = rate_slope;
|
||||
line.modified = true;
|
||||
|
||||
if (line.adjustable_flow) {
|
||||
float rate_end = rate_start + rate_slope * line.time_corrected();
|
||||
if (rate_end < line.volumetric_extrusion_rate_end) {
|
||||
// Limit the volumetric extrusion rate at the start of this segment due to a segment
|
||||
// of ExtrusionType iRole, which was extruded before.
|
||||
line.volumetric_extrusion_rate_end = rate_end;
|
||||
line.max_volumetric_extrusion_rate_slope_positive = rate_slope;
|
||||
line.modified = true;
|
||||
}
|
||||
}
|
||||
// feedrate_per_extrusion_role[iRole] = (iRole == line.extrusion_role) ? line.volumetric_extrusion_rate_end : rate_end;
|
||||
feedrate_per_extrusion_role[iRole] = line.volumetric_extrusion_rate_end;
|
||||
// Don't store feed rate for ironing and gap-fill.
|
||||
if (line.extrusion_role != erIroning && line.extrusion_role != erGapFill)
|
||||
feedrate_per_extrusion_role[iRole] = line.volumetric_extrusion_rate_end;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -624,6 +661,7 @@ inline void PressureEqualizer::push_to_output(const char *text, const size_t len
|
||||
// Copy the text to the output.
|
||||
if (len != 0) {
|
||||
memcpy(output_buffer.data() + output_buffer_length, text, len);
|
||||
this->output_buffer_prev_length = this->output_buffer_length;
|
||||
output_buffer_length += len;
|
||||
}
|
||||
if (add_eol)
|
||||
@ -631,9 +669,22 @@ inline void PressureEqualizer::push_to_output(const char *text, const size_t len
|
||||
output_buffer[output_buffer_length] = 0;
|
||||
}
|
||||
|
||||
void PressureEqualizer::push_line_to_output(const GCodeLine &line, const float new_feedrate, const char *comment)
|
||||
inline bool PressureEqualizer::is_just_feedrate_provided(const GCodeLine &line)
|
||||
{
|
||||
push_to_output(EXTRUDE_END_TAG.data(), EXTRUDE_END_TAG.length(), true);
|
||||
return line.pos_provided[4] && !line.pos_provided[0] && !line.pos_provided[1] && !line.pos_provided[2] && !line.pos_provided[3];
|
||||
}
|
||||
|
||||
void PressureEqualizer::push_line_to_output(const size_t line_idx, const float new_feedrate, const char *comment)
|
||||
{
|
||||
const GCodeLine &line = this->m_gcode_lines[line_idx];
|
||||
if (line_idx > 0) {
|
||||
const GCodeLine &prev_line = this->m_gcode_lines[line_idx - 1];
|
||||
if (prev_line.extrude_set_speed_tag && this->is_just_feedrate_provided(prev_line))
|
||||
this->output_buffer_length = this->output_buffer_prev_length; // Remove the last line because it only sets the speed for an empty block of g-code lines, so it is useless.
|
||||
else
|
||||
push_to_output(EXTRUDE_END_TAG.data(), EXTRUDE_END_TAG.length(), true);
|
||||
} else
|
||||
push_to_output(EXTRUDE_END_TAG.data(), EXTRUDE_END_TAG.length(), true);
|
||||
|
||||
GCodeG1Formatter feedrate_formatter;
|
||||
feedrate_formatter.emit_f(new_feedrate);
|
||||
|
@ -81,6 +81,10 @@ private:
|
||||
bool m_retracted;
|
||||
bool m_use_relative_e_distances;
|
||||
|
||||
// Indicate if extrude set speed block was opened using the tag ";_EXTRUDE_SET_SPEED"
|
||||
// or not (not opened, or it was closed using the tag ";_EXTRUDE_END").
|
||||
bool opened_extrude_set_speed_block = false;
|
||||
|
||||
enum GCodeLineType {
|
||||
GCODELINETYPE_INVALID,
|
||||
GCODELINETYPE_NOOP,
|
||||
@ -139,7 +143,7 @@ private:
|
||||
// X,Y,Z,E,F. Storing the state of the currently active extruder only.
|
||||
float pos_start[5];
|
||||
float pos_end[5];
|
||||
// Was the axis found on the G-code line? X,Y,Z,F
|
||||
// Was the axis found on the G-code line? X,Y,Z,E,F
|
||||
bool pos_provided[5];
|
||||
|
||||
// Index of the active extruder.
|
||||
@ -158,11 +162,17 @@ private:
|
||||
// If set to zero, the slope is unlimited.
|
||||
float max_volumetric_extrusion_rate_slope_positive;
|
||||
float max_volumetric_extrusion_rate_slope_negative;
|
||||
|
||||
bool adjustable_flow = false;
|
||||
|
||||
bool extrude_set_speed_tag = false;
|
||||
bool extrude_end_tag = false;
|
||||
};
|
||||
|
||||
// Output buffer will only grow. It will not be reallocated over and over.
|
||||
std::vector<char> output_buffer;
|
||||
size_t output_buffer_length;
|
||||
size_t output_buffer_prev_length;
|
||||
|
||||
#ifdef PRESSURE_EQUALIZER_DEBUG
|
||||
// For debugging purposes. Index of the G-code line processed.
|
||||
@ -170,7 +180,7 @@ private:
|
||||
#endif
|
||||
|
||||
bool process_line(const char *line, const char *line_end, GCodeLine &buf);
|
||||
void output_gcode_line(GCodeLine &buf);
|
||||
void output_gcode_line(size_t line_idx);
|
||||
|
||||
// Go back from the current circular_buffer_pos and lower the feedtrate to decrease the slope of the extrusion rate changes.
|
||||
// Then go forward and adjust the feedrate to decrease the slope of the extrusion rate changes.
|
||||
@ -181,7 +191,9 @@ private:
|
||||
inline void push_to_output(const std::string &text, bool add_eol);
|
||||
inline void push_to_output(const char *text, size_t len, bool add_eol = true);
|
||||
// Push a G-code line to the output.
|
||||
void push_line_to_output(const GCodeLine &line, float new_feedrate, const char *comment);
|
||||
void push_line_to_output(size_t line_idx, float new_feedrate, const char *comment);
|
||||
|
||||
inline bool is_just_feedrate_provided(const GCodeLine &line);
|
||||
|
||||
public:
|
||||
std::queue<LayerResult*> m_layer_results;
|
||||
|
@ -319,7 +319,7 @@ struct GlobalModelInfo {
|
||||
return 1.0f;
|
||||
}
|
||||
|
||||
auto compute_dist_to_plane = [](const Vec3f& position, const Vec3f& plane_origin, const Vec3f& plane_normal) {
|
||||
auto compute_dist_to_plane = [](const Vec3f &position, const Vec3f &plane_origin, const Vec3f &plane_normal) {
|
||||
Vec3f orig_to_point = position - plane_origin;
|
||||
return std::abs(orig_to_point.dot(plane_normal));
|
||||
};
|
||||
@ -403,9 +403,9 @@ Polygons extract_perimeter_polygons(const Layer *layer, const SeamPosition confi
|
||||
if (ex_entity->is_collection()) { //collection of inner, outer, and overhang perimeters
|
||||
for (const ExtrusionEntity *perimeter : static_cast<const ExtrusionEntityCollection*>(ex_entity)->entities) {
|
||||
ExtrusionRole role = perimeter->role();
|
||||
if (perimeter->is_loop()){
|
||||
for (const ExtrusionPath& path : static_cast<const ExtrusionLoop*>(perimeter)->paths){
|
||||
if (path.role() == ExtrusionRole::erExternalPerimeter){
|
||||
if (perimeter->is_loop()) {
|
||||
for (const ExtrusionPath &path : static_cast<const ExtrusionLoop*>(perimeter)->paths) {
|
||||
if (path.role() == ExtrusionRole::erExternalPerimeter) {
|
||||
role = ExtrusionRole::erExternalPerimeter;
|
||||
}
|
||||
}
|
||||
@ -449,7 +449,7 @@ Polygons extract_perimeter_polygons(const Layer *layer, const SeamPosition confi
|
||||
//each SeamCandidate also contains pointer to shared Perimeter structure representing the polygon
|
||||
// if Custom Seam modifiers are present, oversamples the polygon if necessary to better fit user intentions
|
||||
void process_perimeter_polygon(const Polygon &orig_polygon, float z_coord, const LayerRegion *region,
|
||||
bool arachne_generated, const GlobalModelInfo &global_model_info, PrintObjectSeamData::LayerSeams &result) {
|
||||
const GlobalModelInfo &global_model_info, PrintObjectSeamData::LayerSeams &result) {
|
||||
if (orig_polygon.size() == 0) {
|
||||
return;
|
||||
}
|
||||
@ -464,26 +464,6 @@ void process_perimeter_polygon(const Polygon &orig_polygon, float z_coord, const
|
||||
std::vector<float> polygon_angles = calculate_polygon_angles_at_vertices(polygon, lengths,
|
||||
SeamPlacer::polygon_local_angles_arm_distance);
|
||||
|
||||
// resample smooth surfaces from arachne, so that alignment finds short path down, and does not create unnecesary curves
|
||||
if (arachne_generated && std::all_of(polygon_angles.begin(), polygon_angles.end(), [](float angle) {
|
||||
return compute_angle_penalty(angle) > SeamPlacer::sharp_angle_penalty_snapping_threshold;
|
||||
})) {
|
||||
float total_dist = std::accumulate(lengths.begin(), lengths.end(), 0.0f);
|
||||
float avg_dist = total_dist / float(lengths.size());
|
||||
if (avg_dist < SeamPlacer::seam_align_tolerable_dist * 2.0f){
|
||||
coord_t sampling_dist = scaled(avg_dist*0.2f);
|
||||
|
||||
polygon.points = polygon.equally_spaced_points(sampling_dist);
|
||||
lengths.clear();
|
||||
for (size_t point_idx = 0; point_idx < polygon.size() - 1; ++point_idx) {
|
||||
lengths.push_back((unscale(polygon[point_idx]) - unscale(polygon[point_idx + 1])).norm());
|
||||
}
|
||||
lengths.push_back(std::max((unscale(polygon[0]) - unscale(polygon[polygon.size() - 1])).norm(), 0.1));
|
||||
polygon_angles = calculate_polygon_angles_at_vertices(polygon, lengths, avg_dist);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
result.perimeters.push_back( { });
|
||||
Perimeter &perimeter = result.perimeters.back();
|
||||
|
||||
@ -540,50 +520,72 @@ void process_perimeter_polygon(const Polygon &orig_polygon, float z_coord, const
|
||||
result.points.emplace_back(position, perimeter, local_ccw_angle, type);
|
||||
}
|
||||
|
||||
perimeter.end_index = result.points.size() - 1;
|
||||
perimeter.end_index = result.points.size();
|
||||
|
||||
// We will find first patch of enforced points (patch: continuous section of enforced points) and select the middle
|
||||
// point, which will have priority during alignment
|
||||
// If there are multiple enforced patches in the perimeter, others are ignored
|
||||
if (some_point_enforced) {
|
||||
size_t perimeter_size = perimeter.end_index - perimeter.start_index + 1;
|
||||
// We will patches of enforced points (patch: continuous section of enforced points), choose
|
||||
// the longest patch, and select the middle point or sharp point (depending on the angle)
|
||||
// this point will have high priority on this perimeter
|
||||
size_t perimeter_size = perimeter.end_index - perimeter.start_index;
|
||||
const auto next_index = [&](size_t idx) {
|
||||
return perimeter.start_index + Slic3r::next_idx_modulo(idx - perimeter.start_index, perimeter_size);
|
||||
};
|
||||
|
||||
size_t first_enforced_idx = perimeter.start_index;
|
||||
for (size_t _ = 0; _ < perimeter_size; ++_) {
|
||||
if (result.points[first_enforced_idx].type != EnforcedBlockedSeamPoint::Enforced &&
|
||||
result.points[next_index(first_enforced_idx)].type == EnforcedBlockedSeamPoint::Enforced) {
|
||||
break;
|
||||
std::vector<size_t> patches_starts_ends;
|
||||
for (size_t i = perimeter.start_index; i < perimeter.end_index; ++i) {
|
||||
if (result.points[i].type != EnforcedBlockedSeamPoint::Enforced &&
|
||||
result.points[next_index(i)].type == EnforcedBlockedSeamPoint::Enforced) {
|
||||
patches_starts_ends.push_back(next_index(i));
|
||||
}
|
||||
if (result.points[i].type == EnforcedBlockedSeamPoint::Enforced &&
|
||||
result.points[next_index(i)].type != EnforcedBlockedSeamPoint::Enforced) {
|
||||
patches_starts_ends.push_back(next_index(i));
|
||||
}
|
||||
first_enforced_idx = next_index(first_enforced_idx);
|
||||
}
|
||||
first_enforced_idx = next_index(first_enforced_idx);
|
||||
|
||||
// Gather also points with large angles (these are points from the original mesh, since oversampled points have zero angle)
|
||||
// If there are any, the middle point will be picked from those (makes drawing over sharp corners easier)
|
||||
std::vector<size_t> orig_large_angle_points_indices { };
|
||||
std::vector<size_t> viable_points_indices { };
|
||||
size_t last_enforced_idx = first_enforced_idx;
|
||||
for (size_t _ = 0; _ < perimeter_size; ++_) {
|
||||
if (result.points[last_enforced_idx].type != EnforcedBlockedSeamPoint::Enforced) {
|
||||
break;
|
||||
//if patches_starts_ends are empty, it means that the whole perimeter is enforced.. don't do anything in that case
|
||||
if (!patches_starts_ends.empty()) {
|
||||
//if the first point in the patches is not enforced, it marks a patch end. in that case, put it to the end and start on next
|
||||
// to simplify the processing
|
||||
assert(patches_starts_ends.size() % 2 == 0);
|
||||
bool start_on_second = false;
|
||||
if (result.points[patches_starts_ends[0]].type != EnforcedBlockedSeamPoint::Enforced) {
|
||||
start_on_second = true;
|
||||
patches_starts_ends.push_back(patches_starts_ends[0]);
|
||||
}
|
||||
viable_points_indices.push_back(last_enforced_idx);
|
||||
if (compute_angle_penalty(result.points[last_enforced_idx].local_ccw_angle)
|
||||
< SeamPlacer::sharp_angle_penalty_snapping_threshold) {
|
||||
orig_large_angle_points_indices.push_back(last_enforced_idx);
|
||||
//now pick the longest patch
|
||||
std::pair<size_t, size_t> longest_patch { 0, 0 };
|
||||
auto patch_len = [perimeter_size](const std::pair<size_t, size_t> &start_end) {
|
||||
if (start_end.second < start_end.first) {
|
||||
return start_end.first + (perimeter_size - start_end.second);
|
||||
} else {
|
||||
return start_end.second - start_end.first;
|
||||
}
|
||||
};
|
||||
for (size_t patch_idx = start_on_second ? 1 : 0; patch_idx < patches_starts_ends.size(); patch_idx += 2) {
|
||||
std::pair<size_t, size_t> current_patch { patches_starts_ends[patch_idx], patches_starts_ends[patch_idx
|
||||
+ 1] };
|
||||
if (patch_len(longest_patch) < patch_len(current_patch)) {
|
||||
longest_patch = current_patch;
|
||||
}
|
||||
}
|
||||
std::vector<size_t> viable_points_indices;
|
||||
std::vector<size_t> large_angle_points_indices;
|
||||
for (size_t point_idx = longest_patch.first; point_idx != longest_patch.second;
|
||||
point_idx = next_index(point_idx)) {
|
||||
viable_points_indices.push_back(point_idx);
|
||||
if (std::abs(result.points[point_idx].local_ccw_angle)
|
||||
> SeamPlacer::sharp_angle_snapping_threshold) {
|
||||
large_angle_points_indices.push_back(point_idx);
|
||||
}
|
||||
}
|
||||
assert(viable_points_indices.size() > 0);
|
||||
if (large_angle_points_indices.empty()) {
|
||||
size_t central_idx = viable_points_indices[viable_points_indices.size() / 2];
|
||||
result.points[central_idx].central_enforcer = true;
|
||||
} else {
|
||||
size_t central_idx = large_angle_points_indices.size() / 2;
|
||||
result.points[large_angle_points_indices[central_idx]].central_enforcer = true;
|
||||
}
|
||||
last_enforced_idx = next_index(last_enforced_idx);
|
||||
}
|
||||
assert(viable_points_indices.size() > 0);
|
||||
if (orig_large_angle_points_indices.empty()) {
|
||||
size_t central_idx = viable_points_indices[viable_points_indices.size() / 2];
|
||||
result.points[central_idx].central_enforcer = true;
|
||||
} else {
|
||||
size_t central_idx = orig_large_angle_points_indices.size() / 2;
|
||||
result.points[orig_large_angle_points_indices[central_idx]].central_enforcer = true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -603,7 +605,7 @@ std::pair<size_t, size_t> find_previous_and_next_perimeter_point(const std::vect
|
||||
prev = current.perimeter.end_index;
|
||||
}
|
||||
|
||||
if (point_index == current.perimeter.end_index) {
|
||||
if (point_index == current.perimeter.end_index - 1) {
|
||||
// if point_index is equal to end, than next neighbour is at the start
|
||||
next = current.perimeter.start_index;
|
||||
}
|
||||
@ -731,7 +733,8 @@ struct SeamComparator {
|
||||
float angle_importance;
|
||||
explicit SeamComparator(SeamPosition setup) :
|
||||
setup(setup) {
|
||||
angle_importance = setup == spNearest ? SeamPlacer::angle_importance_nearest : SeamPlacer::angle_importance_aligned;
|
||||
angle_importance =
|
||||
setup == spNearest ? SeamPlacer::angle_importance_nearest : SeamPlacer::angle_importance_aligned;
|
||||
}
|
||||
|
||||
// Standard comparator, must respect the requirements of comparators (e.g. give same result on same inputs) for sorting usage
|
||||
@ -748,8 +751,7 @@ struct SeamComparator {
|
||||
}
|
||||
|
||||
//avoid overhangs
|
||||
if (a.overhang > SeamPlacer::overhang_distance_tolerance_factor * a.perimeter.flow_width ||
|
||||
b.overhang > SeamPlacer::overhang_distance_tolerance_factor * b.perimeter.flow_width) {
|
||||
if (a.overhang > 0.0f || b.overhang > 0.0f) {
|
||||
return a.overhang < b.overhang;
|
||||
}
|
||||
|
||||
@ -773,10 +775,10 @@ struct SeamComparator {
|
||||
}
|
||||
|
||||
// the penalites are kept close to range [0-1.x] however, it should not be relied upon
|
||||
float penalty_a = a.visibility +
|
||||
float penalty_a = a.overhang + a.visibility +
|
||||
angle_importance * compute_angle_penalty(a.local_ccw_angle)
|
||||
+ distance_penalty_a;
|
||||
float penalty_b = b.visibility +
|
||||
float penalty_b = b.overhang + b.visibility +
|
||||
angle_importance * compute_angle_penalty(b.local_ccw_angle)
|
||||
+ distance_penalty_b;
|
||||
|
||||
@ -806,8 +808,8 @@ struct SeamComparator {
|
||||
}
|
||||
|
||||
//avoid overhangs
|
||||
if (a.overhang > SeamPlacer::overhang_distance_tolerance_factor * a.perimeter.flow_width ||
|
||||
b.overhang > SeamPlacer::overhang_distance_tolerance_factor * b.perimeter.flow_width) {
|
||||
if ((a.overhang > 0.0f || b.overhang > 0.0f)
|
||||
&& abs(a.overhang - b.overhang) > (0.1f * a.perimeter.flow_width)) {
|
||||
return a.overhang < b.overhang;
|
||||
}
|
||||
|
||||
@ -827,9 +829,9 @@ struct SeamComparator {
|
||||
return a.position.y() + SeamPlacer::seam_align_score_tolerance * 5.0f > b.position.y();
|
||||
}
|
||||
|
||||
float penalty_a = a.visibility
|
||||
float penalty_a = a.overhang + a.visibility
|
||||
+ angle_importance * compute_angle_penalty(a.local_ccw_angle);
|
||||
float penalty_b = b.visibility +
|
||||
float penalty_b = b.overhang + b.visibility +
|
||||
angle_importance * compute_angle_penalty(b.local_ccw_angle);
|
||||
|
||||
return penalty_a <= penalty_b || penalty_a - penalty_b < SeamPlacer::seam_align_score_tolerance;
|
||||
@ -838,13 +840,6 @@ struct SeamComparator {
|
||||
bool are_similar(const SeamCandidate &a, const SeamCandidate &b) const {
|
||||
return is_first_not_much_worse(a, b) && is_first_not_much_worse(b, a);
|
||||
}
|
||||
|
||||
float weight(const SeamCandidate &a) const {
|
||||
if (setup == SeamPosition::spAligned && a.central_enforcer) {
|
||||
return 2.0f;
|
||||
}
|
||||
return a.visibility + angle_importance * compute_angle_penalty(a.local_ccw_angle) / (1.0f + angle_importance);
|
||||
}
|
||||
};
|
||||
|
||||
#ifdef DEBUG_FILES
|
||||
@ -868,8 +863,8 @@ void debug_export_points(const std::vector<PrintObjectSeamData::LayerSeams> &lay
|
||||
min_vis = std::min(min_vis, point.visibility);
|
||||
max_vis = std::max(max_vis, point.visibility);
|
||||
|
||||
min_weight = std::min(min_weight, -comparator.compute_angle_penalty(point.local_ccw_angle));
|
||||
max_weight = std::max(max_weight, -comparator.compute_angle_penalty(point.local_ccw_angle));
|
||||
min_weight = std::min(min_weight, -compute_angle_penalty(point.local_ccw_angle));
|
||||
max_weight = std::max(max_weight, -compute_angle_penalty(point.local_ccw_angle));
|
||||
|
||||
}
|
||||
|
||||
@ -890,7 +885,7 @@ void debug_export_points(const std::vector<PrintObjectSeamData::LayerSeams> &lay
|
||||
visibility_svg.draw(scaled(Vec2f(point.position.head<2>())), visibility_fill);
|
||||
|
||||
Vec3i weight_color = value_to_rgbi(min_weight, max_weight,
|
||||
-comparator.compute_angle_penalty(point.local_ccw_angle));
|
||||
-compute_angle_penalty(point.local_ccw_angle));
|
||||
std::string weight_fill = "rgb(" + std::to_string(weight_color.x()) + "," + std::to_string(weight_color.y())
|
||||
+ ","
|
||||
+ std::to_string(weight_color.z()) + ")";
|
||||
@ -913,7 +908,7 @@ void pick_seam_point(std::vector<SeamCandidate> &perimeter_points, size_t start_
|
||||
size_t end_index = perimeter_points[start_index].perimeter.end_index;
|
||||
|
||||
size_t seam_index = start_index;
|
||||
for (size_t index = start_index; index <= end_index; ++index) {
|
||||
for (size_t index = start_index; index < end_index; ++index) {
|
||||
if (comparator.is_first_better(perimeter_points[index], perimeter_points[seam_index])) {
|
||||
seam_index = index;
|
||||
}
|
||||
@ -927,7 +922,7 @@ size_t pick_nearest_seam_point_index(const std::vector<SeamCandidate> &perimeter
|
||||
SeamComparator comparator { spNearest };
|
||||
|
||||
size_t seam_index = start_index;
|
||||
for (size_t index = start_index; index <= end_index; ++index) {
|
||||
for (size_t index = start_index; index < end_index; ++index) {
|
||||
if (comparator.is_first_better(perimeter_points[index], perimeter_points[seam_index], preffered_location)) {
|
||||
seam_index = index;
|
||||
}
|
||||
@ -954,10 +949,10 @@ void pick_random_seam_point(const std::vector<SeamCandidate> &perimeter_points,
|
||||
};
|
||||
std::vector<Viable> viables;
|
||||
|
||||
for (size_t index = start_index; index <= end_index; ++index) {
|
||||
for (size_t index = start_index; index < end_index; ++index) {
|
||||
if (comparator.are_similar(perimeter_points[index], perimeter_points[viable_example_index])) {
|
||||
// index ok, push info into viables
|
||||
Vec3f edge_to_next { perimeter_points[index == end_index ? start_index : index + 1].position
|
||||
Vec3f edge_to_next { perimeter_points[index == end_index - 1 ? start_index : index + 1].position
|
||||
- perimeter_points[index].position };
|
||||
float dist_to_next = edge_to_next.norm();
|
||||
viables.push_back( { index, dist_to_next, edge_to_next });
|
||||
@ -970,7 +965,7 @@ void pick_random_seam_point(const std::vector<SeamCandidate> &perimeter_points,
|
||||
viable_example_index = index;
|
||||
viables.clear();
|
||||
|
||||
Vec3f edge_to_next = (perimeter_points[index == end_index ? start_index : index + 1].position
|
||||
Vec3f edge_to_next = (perimeter_points[index == end_index - 1 ? start_index : index + 1].position
|
||||
- perimeter_points[index].position);
|
||||
float dist_to_next = edge_to_next.norm();
|
||||
viables.push_back( { index, dist_to_next, edge_to_next });
|
||||
@ -1024,8 +1019,8 @@ public:
|
||||
|
||||
float distance_from_perimeter(const Point &point) const {
|
||||
Vec2d p = unscale(point);
|
||||
size_t hit_idx_out;
|
||||
Vec2d hit_point_out;
|
||||
size_t hit_idx_out { };
|
||||
Vec2d hit_point_out = Vec2d::Zero();
|
||||
auto distance = AABBTreeLines::squared_distance_to_indexed_lines(lines, tree, p, hit_idx_out, hit_point_out);
|
||||
if (distance < 0) {
|
||||
return std::numeric_limits<float>::max();
|
||||
@ -1051,14 +1046,12 @@ public:
|
||||
void SeamPlacer::gather_seam_candidates(const PrintObject *po,
|
||||
const SeamPlacerImpl::GlobalModelInfo &global_model_info, const SeamPosition configured_seam_preference) {
|
||||
using namespace SeamPlacerImpl;
|
||||
bool arachne_generated = po->config().perimeter_generator == PerimeterGeneratorType::Arachne;
|
||||
|
||||
PrintObjectSeamData &seam_data = m_seam_per_object.emplace(po, PrintObjectSeamData { }).first->second;
|
||||
seam_data.layers.resize(po->layer_count());
|
||||
|
||||
tbb::parallel_for(tbb::blocked_range<size_t>(0, po->layers().size()),
|
||||
[po, configured_seam_preference, arachne_generated, &global_model_info, &seam_data]
|
||||
(tbb::blocked_range<size_t> r) {
|
||||
[po, configured_seam_preference, &global_model_info, &seam_data]
|
||||
(tbb::blocked_range<size_t> r) {
|
||||
for (size_t layer_idx = r.begin(); layer_idx < r.end(); ++layer_idx) {
|
||||
PrintObjectSeamData::LayerSeams &layer_seams = seam_data.layers[layer_idx];
|
||||
const Layer *layer = po->get_layer(layer_idx);
|
||||
@ -1068,7 +1061,7 @@ void SeamPlacer::gather_seam_candidates(const PrintObject *po,
|
||||
Polygons polygons = extract_perimeter_polygons(layer, configured_seam_preference, regions);
|
||||
for (size_t poly_index = 0; poly_index < polygons.size(); ++poly_index) {
|
||||
process_perimeter_polygon(polygons[poly_index], unscaled_z,
|
||||
regions[poly_index], arachne_generated, global_model_info, layer_seams);
|
||||
regions[poly_index], global_model_info, layer_seams);
|
||||
}
|
||||
auto functor = SeamCandidateCoordinateFunctor { layer_seams.points };
|
||||
seam_data.layers[layer_idx].points_tree =
|
||||
@ -1119,11 +1112,18 @@ void SeamPlacer::calculate_overhangs_and_layer_embedding(const PrintObject *po)
|
||||
for (SeamCandidate &perimeter_point : layers[layer_idx].points) {
|
||||
Point point = Point::new_scale(Vec2f { perimeter_point.position.head<2>() });
|
||||
if (prev_layer_distancer.get() != nullptr) {
|
||||
perimeter_point.overhang = prev_layer_distancer->distance_from_perimeter(point);
|
||||
perimeter_point.overhang = (prev_layer_distancer->distance_from_perimeter(point)
|
||||
+ 0.5f * perimeter_point.perimeter.flow_width
|
||||
- tan(SeamPlacer::overhang_angle_threshold)
|
||||
* po->layers()[layer_idx]->height)
|
||||
/ (3.0f * perimeter_point.perimeter.flow_width);
|
||||
//NOTE disables the feature to place seams on slowly decreasing areas. Remove the following line to enable.
|
||||
perimeter_point.overhang = perimeter_point.overhang < 0.0f ? 0.0f : perimeter_point.overhang;
|
||||
}
|
||||
|
||||
if (should_compute_layer_embedding) { // search for embedded perimeter points (points hidden inside the print ,e.g. multimaterial join, best position for seam)
|
||||
perimeter_point.embedded_distance = current_layer_distancer->distance_from_perimeter(point);
|
||||
perimeter_point.embedded_distance = current_layer_distancer->distance_from_perimeter(point)
|
||||
+ 0.5f * perimeter_point.perimeter.flow_width;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1142,7 +1142,7 @@ void SeamPlacer::calculate_overhangs_and_layer_embedding(const PrintObject *po)
|
||||
// Used by align_seam_points().
|
||||
std::optional<std::pair<size_t, size_t>> SeamPlacer::find_next_seam_in_layer(
|
||||
const std::vector<PrintObjectSeamData::LayerSeams> &layers,
|
||||
const Vec3f& projected_position,
|
||||
const Vec3f &projected_position,
|
||||
const size_t layer_idx, const float max_distance,
|
||||
const SeamPlacerImpl::SeamComparator &comparator) const {
|
||||
using namespace SeamPlacerImpl;
|
||||
@ -1205,9 +1205,7 @@ std::optional<std::pair<size_t, size_t>> SeamPlacer::find_next_seam_in_layer(
|
||||
}
|
||||
|
||||
std::vector<std::pair<size_t, size_t>> SeamPlacer::find_seam_string(const PrintObject *po,
|
||||
std::pair<size_t, size_t> start_seam, const SeamPlacerImpl::SeamComparator &comparator,
|
||||
float& string_weight) const {
|
||||
string_weight = 0.0f;
|
||||
std::pair<size_t, size_t> start_seam, const SeamPlacerImpl::SeamComparator &comparator) const {
|
||||
const std::vector<PrintObjectSeamData::LayerSeams> &layers = m_seam_per_object.find(po)->second.layers;
|
||||
int layer_idx = start_seam.first;
|
||||
|
||||
@ -1230,7 +1228,8 @@ std::vector<std::pair<size_t, size_t>> SeamPlacer::find_seam_string(const PrintO
|
||||
break;
|
||||
}
|
||||
}
|
||||
float max_distance = SeamPlacer::seam_align_tolerable_dist;
|
||||
float max_distance = SeamPlacer::seam_align_tolerable_dist_factor *
|
||||
layers[start_seam.first].points[start_seam.second].perimeter.flow_width;
|
||||
Vec3f prev_position = layers[prev_point_index.first].points[prev_point_index.second].position;
|
||||
Vec3f projected_position = prev_position;
|
||||
projected_position.z() = float(po->get_layer(next_layer)->slice_z);
|
||||
@ -1241,11 +1240,6 @@ std::vector<std::pair<size_t, size_t>> SeamPlacer::find_seam_string(const PrintO
|
||||
|
||||
if (maybe_next_seam.has_value()) {
|
||||
// For old macOS (pre 10.14), std::optional does not have .value() method, so the code is using operator*() instead.
|
||||
std::pair<size_t, size_t> next_seam_coords = maybe_next_seam.operator*();
|
||||
const auto &next_seam = layers[next_seam_coords.first].points[next_seam_coords.second];
|
||||
bool is_moved = next_seam.perimeter.seam_index != next_seam_coords.second;
|
||||
string_weight += comparator.weight(next_seam) -
|
||||
is_moved ? comparator.weight(layers[next_seam_coords.first].points[next_seam.perimeter.seam_index]) : 0.0f;
|
||||
seam_string.push_back(maybe_next_seam.operator*());
|
||||
prev_point_index = seam_string.back();
|
||||
//String added, prev_point_index updated
|
||||
@ -1253,15 +1247,14 @@ std::vector<std::pair<size_t, size_t>> SeamPlacer::find_seam_string(const PrintO
|
||||
if (step == 1) {
|
||||
reverse_lookup_direction();
|
||||
if (next_layer < 0) {
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
next_layer += step;
|
||||
}
|
||||
|
||||
return seam_string;
|
||||
}
|
||||
|
||||
@ -1300,7 +1293,7 @@ void SeamPlacer::align_seam_points(const PrintObject *po, const SeamPlacerImpl::
|
||||
size_t current_point_index = 0;
|
||||
while (current_point_index < layer_perimeter_points.size()) {
|
||||
seams.emplace_back(layer_idx, layer_perimeter_points[current_point_index].perimeter.seam_index);
|
||||
current_point_index = layer_perimeter_points[current_point_index].perimeter.end_index + 1;
|
||||
current_point_index = layer_perimeter_points[current_point_index].perimeter.end_index;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1331,18 +1324,15 @@ void SeamPlacer::align_seam_points(const PrintObject *po, const SeamPlacerImpl::
|
||||
// This perimeter is already aligned, skip seam
|
||||
continue;
|
||||
} else {
|
||||
float seam_string_weight;
|
||||
seam_string = this->find_seam_string(po, { layer_idx, seam_index }, comparator, seam_string_weight);
|
||||
seam_string = this->find_seam_string(po, { layer_idx, seam_index }, comparator);
|
||||
size_t step_size = 1 + seam_string.size() / 20;
|
||||
for (size_t alternative_start = 0; alternative_start < seam_string.size(); alternative_start+=step_size) {
|
||||
float alternative_seam_string_weight = 0;
|
||||
for (size_t alternative_start = 0; alternative_start < seam_string.size(); alternative_start += step_size) {
|
||||
size_t start_layer_idx = seam_string[alternative_start].first;
|
||||
size_t seam_idx = layers[start_layer_idx].points[seam_string[alternative_start].second].perimeter.seam_index;
|
||||
alternative_seam_string = this->find_seam_string(po, std::pair<size_t,size_t>(start_layer_idx, seam_idx), comparator,
|
||||
alternative_seam_string_weight);
|
||||
if (alternative_seam_string.size() >= SeamPlacer::seam_align_minimum_string_seams &&
|
||||
alternative_seam_string_weight > seam_string_weight) {
|
||||
seam_string_weight = alternative_seam_string_weight;
|
||||
size_t seam_idx =
|
||||
layers[start_layer_idx].points[seam_string[alternative_start].second].perimeter.seam_index;
|
||||
alternative_seam_string = this->find_seam_string(po,
|
||||
std::pair<size_t, size_t>(start_layer_idx, seam_idx), comparator);
|
||||
if (alternative_seam_string.size() > seam_string.size()) {
|
||||
seam_string = std::move(alternative_seam_string);
|
||||
}
|
||||
}
|
||||
@ -1361,36 +1351,60 @@ void SeamPlacer::align_seam_points(const PrintObject *po, const SeamPlacerImpl::
|
||||
//repeat the alignment for the current seam, since it could be skipped due to alternative path being aligned.
|
||||
global_index--;
|
||||
|
||||
// gather all positions of seams and their weights (weights are derived as negative penalty, they are made positive in next step)
|
||||
// gather all positions of seams and their weights
|
||||
observations.resize(seam_string.size());
|
||||
observation_points.resize(seam_string.size());
|
||||
weights.resize(seam_string.size());
|
||||
|
||||
auto angle_3d = [](const Vec3f& a, const Vec3f& b){
|
||||
return std::abs(acosf(a.normalized().dot(b.normalized())));
|
||||
};
|
||||
|
||||
auto angle_weight = [](float angle){
|
||||
return 1.0f / (0.1f + compute_angle_penalty(angle));
|
||||
};
|
||||
|
||||
//gather points positions and weights
|
||||
float total_length = 0.0f;
|
||||
Vec3f last_point_pos = layers[seam_string[0].first].points[seam_string[0].second].position;
|
||||
for (size_t index = 0; index < seam_string.size(); ++index) {
|
||||
Vec3f pos = layers[seam_string[index].first].points[seam_string[index].second].position;
|
||||
total_length += (last_point_pos - pos).norm();
|
||||
last_point_pos = pos;
|
||||
observations[index] = pos.head<2>();
|
||||
observation_points[index] = pos.z();
|
||||
weights[index] = comparator.weight(layers[seam_string[index].first].points[seam_string[index].second]);
|
||||
const SeamCandidate ¤t = layers[seam_string[index].first].points[seam_string[index].second];
|
||||
float layer_angle = 0.0f;
|
||||
if (index > 0 && index < seam_string.size() - 1) {
|
||||
layer_angle = angle_3d(
|
||||
current.position
|
||||
- layers[seam_string[index - 1].first].points[seam_string[index - 1].second].position,
|
||||
layers[seam_string[index + 1].first].points[seam_string[index + 1].second].position
|
||||
- current.position
|
||||
);
|
||||
}
|
||||
observations[index] = current.position.head<2>();
|
||||
observation_points[index] = current.position.z();
|
||||
weights[index] = angle_weight(current.local_ccw_angle);
|
||||
float sign = layer_angle > 2.0 * std::abs(current.local_ccw_angle) ? -1.0f : 1.0f;
|
||||
if (current.type == EnforcedBlockedSeamPoint::Enforced) {
|
||||
sign = 1.0f;
|
||||
weights[index] += 3.0f;
|
||||
}
|
||||
total_length += sign * (last_point_pos - current.position).norm();
|
||||
last_point_pos = current.position;
|
||||
}
|
||||
|
||||
// Curve Fitting
|
||||
size_t number_of_segments = std::max(size_t(1),
|
||||
size_t(total_length / SeamPlacer::seam_align_mm_per_segment));
|
||||
size_t(std::max(0.0f,total_length) / SeamPlacer::seam_align_mm_per_segment));
|
||||
auto curve = Geometry::fit_cubic_bspline(observations, observation_points, weights, number_of_segments);
|
||||
|
||||
// Do alignment - compute fitted point for each point in the string from its Z coord, and store the position into
|
||||
// Perimeter structure of the point; also set flag aligned to true
|
||||
for (size_t index = 0; index < seam_string.size(); ++index) {
|
||||
const auto &pair = seam_string[index];
|
||||
const float t =
|
||||
compute_angle_penalty(layers[pair.first].points[pair.second].local_ccw_angle)
|
||||
< SeamPlacer::sharp_angle_penalty_snapping_threshold
|
||||
? 0.8f : 0.0f;
|
||||
float t = std::min(1.0f, std::abs(layers[pair.first].points[pair.second].local_ccw_angle)
|
||||
/ SeamPlacer::sharp_angle_snapping_threshold);
|
||||
if (layers[pair.first].points[pair.second].type == EnforcedBlockedSeamPoint::Enforced){
|
||||
t = std::max(0.7f, t);
|
||||
}
|
||||
|
||||
Vec3f current_pos = layers[pair.first].points[pair.second].position;
|
||||
Vec2f fitted_pos = curve.get_fitted_value(current_pos.z());
|
||||
|
||||
@ -1483,7 +1497,7 @@ void SeamPlacer::init(const Print &print, std::function<void(void)> throw_if_can
|
||||
for (size_t layer_idx = r.begin(); layer_idx < r.end(); ++layer_idx) {
|
||||
std::vector<SeamCandidate> &layer_perimeter_points = layers[layer_idx].points;
|
||||
for (size_t current = 0; current < layer_perimeter_points.size();
|
||||
current = layer_perimeter_points[current].perimeter.end_index + 1)
|
||||
current = layer_perimeter_points[current].perimeter.end_index)
|
||||
if (configured_seam_preference == spRandom)
|
||||
pick_random_seam_point(layer_perimeter_points, current);
|
||||
else
|
||||
@ -1557,12 +1571,12 @@ void SeamPlacer::place_seam(const Layer *layer, ExtrusionLoop &loop, bool extern
|
||||
// the internal seam into the concave corner, and not on the perpendicular projection on the closest edge (which is what the split_at function does)
|
||||
size_t index_of_prev =
|
||||
seam_index == perimeter_point.perimeter.start_index ?
|
||||
perimeter_point.perimeter.end_index :
|
||||
perimeter_point.perimeter.end_index - 1 :
|
||||
seam_index - 1;
|
||||
size_t index_of_next =
|
||||
seam_index == perimeter_point.perimeter.end_index ?
|
||||
perimeter_point.perimeter.start_index :
|
||||
seam_index + 1;
|
||||
seam_index == perimeter_point.perimeter.end_index - 1 ?
|
||||
perimeter_point.perimeter.start_index :
|
||||
seam_index + 1;
|
||||
|
||||
Vec2f dir_to_middle =
|
||||
((perimeter_point.position - layer_perimeters.points[index_of_prev].position).head<2>().normalized()
|
||||
|
@ -39,16 +39,16 @@ enum class EnforcedBlockedSeamPoint {
|
||||
|
||||
// struct representing single perimeter loop
|
||||
struct Perimeter {
|
||||
size_t start_index;
|
||||
size_t end_index; //inclusive!
|
||||
size_t seam_index;
|
||||
float flow_width;
|
||||
size_t start_index{};
|
||||
size_t end_index{}; //inclusive!
|
||||
size_t seam_index{};
|
||||
float flow_width{};
|
||||
|
||||
// During alignment, a final position may be stored here. In that case, finalized is set to true.
|
||||
// Note that final seam position is not limited to points of the perimeter loop. In theory it can be any position
|
||||
// Random position also uses this flexibility to set final seam point position
|
||||
bool finalized = false;
|
||||
Vec3f final_seam_position;
|
||||
Vec3f final_seam_position = Vec3f::Zero();
|
||||
};
|
||||
|
||||
//Struct over which all processing of perimeters is done. For each perimeter point, its respective candidate is created,
|
||||
@ -115,11 +115,10 @@ public:
|
||||
|
||||
// arm length used during angles computation
|
||||
static constexpr float polygon_local_angles_arm_distance = 0.3f;
|
||||
// value for angles with penalty lower than this threshold - such angles will be snapped to their original position instead of spline interpolated position
|
||||
static constexpr float sharp_angle_penalty_snapping_threshold = 0.6f;
|
||||
|
||||
// max tolerable distance from the previous layer is overhang_distance_tolerance_factor * flow_width
|
||||
static constexpr float overhang_distance_tolerance_factor = 0.5f;
|
||||
// snapping angle - angles larger than this value will be snapped to during seam painting
|
||||
static constexpr float sharp_angle_snapping_threshold = 55.0f * float(PI) / 180.0f;
|
||||
// overhang angle for seam placement that still yields good results, in degrees, measured from vertical direction
|
||||
static constexpr float overhang_angle_threshold = 45.0f * float(PI) / 180.0f;
|
||||
|
||||
// determines angle importance compared to visibility ( neutral value is 1.0f. )
|
||||
static constexpr float angle_importance_aligned = 0.6f;
|
||||
@ -131,8 +130,8 @@ public:
|
||||
// When searching for seam clusters for alignment:
|
||||
// following value describes, how much worse score can point have and still be picked into seam cluster instead of original seam point on the same layer
|
||||
static constexpr float seam_align_score_tolerance = 0.3f;
|
||||
// seam_align_tolerable_dist - if next layer closest point is too far away, break aligned string
|
||||
static constexpr float seam_align_tolerable_dist = 1.0f;
|
||||
// seam_align_tolerable_dist_factor - how far to search for seam from current position, final dist is seam_align_tolerable_dist_factor * flow_width
|
||||
static constexpr float seam_align_tolerable_dist_factor = 4.0f;
|
||||
// minimum number of seams needed in cluster to make alignment happen
|
||||
static constexpr size_t seam_align_minimum_string_seams = 6;
|
||||
// millimeters covered by spline; determines number of splines for the given string
|
||||
@ -154,8 +153,7 @@ private:
|
||||
void align_seam_points(const PrintObject *po, const SeamPlacerImpl::SeamComparator &comparator);
|
||||
std::vector<std::pair<size_t, size_t>> find_seam_string(const PrintObject *po,
|
||||
std::pair<size_t, size_t> start_seam,
|
||||
const SeamPlacerImpl::SeamComparator &comparator,
|
||||
float& string_weight) const;
|
||||
const SeamPlacerImpl::SeamComparator &comparator) const;
|
||||
std::optional<std::pair<size_t, size_t>> find_next_seam_in_layer(
|
||||
const std::vector<PrintObjectSeamData::LayerSeams> &layers,
|
||||
const Vec3f& projected_position,
|
||||
|
@ -560,13 +560,13 @@ inline bool is_rotation_ninety_degrees(const Vec3d &rotation)
|
||||
return is_rotation_ninety_degrees(rotation.x()) && is_rotation_ninety_degrees(rotation.y()) && is_rotation_ninety_degrees(rotation.z());
|
||||
}
|
||||
|
||||
template <class T>
|
||||
std::pair<T, T> dir_to_spheric(const Vec<3, T> &n, T norm = 1.)
|
||||
template <class Tout = double, class Tin>
|
||||
std::pair<Tout, Tout> dir_to_spheric(const Vec<3, Tin> &n, Tout norm = 1.)
|
||||
{
|
||||
T z = n.z();
|
||||
T r = norm;
|
||||
T polar = std::acos(z / r);
|
||||
T azimuth = std::atan2(n(1), n(0));
|
||||
Tout z = n.z();
|
||||
Tout r = norm;
|
||||
Tout polar = std::acos(z / r);
|
||||
Tout azimuth = std::atan2(n(1), n(0));
|
||||
return {polar, azimuth};
|
||||
}
|
||||
|
||||
|
@ -175,6 +175,19 @@ PiecewiseFittedCurve<Dimension, NumberType, Kernel> fit_curve(
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
template<int Dimension, typename NumberType>
|
||||
PiecewiseFittedCurve<Dimension, NumberType, LinearKernel<NumberType>>
|
||||
fit_linear_spline(
|
||||
const std::vector<Vec<Dimension, NumberType>> &observations,
|
||||
std::vector<NumberType> observation_points,
|
||||
std::vector<NumberType> weights,
|
||||
size_t segments_count,
|
||||
size_t endpoints_level_of_freedom = 0) {
|
||||
return fit_curve<LinearKernel<NumberType>>(observations, observation_points, weights, segments_count,
|
||||
endpoints_level_of_freedom);
|
||||
}
|
||||
|
||||
template<int Dimension, typename NumberType>
|
||||
PiecewiseFittedCurve<Dimension, NumberType, CubicBSplineKernel<NumberType>>
|
||||
fit_cubic_bspline(
|
||||
|
100
src/libslic3r/Geometry/VoronoiUtilsCgal.cpp
Normal file
@ -0,0 +1,100 @@
|
||||
#include <CGAL/Exact_predicates_exact_constructions_kernel.h>
|
||||
#include <CGAL/Arr_segment_traits_2.h>
|
||||
#include <CGAL/Surface_sweep_2_algorithms.h>
|
||||
|
||||
#include "libslic3r/Geometry/Voronoi.hpp"
|
||||
|
||||
#include "VoronoiUtilsCgal.hpp"
|
||||
|
||||
using VD = Slic3r::Geometry::VoronoiDiagram;
|
||||
|
||||
namespace Slic3r::Geometry {
|
||||
|
||||
using CGAL_Point = CGAL::Exact_predicates_exact_constructions_kernel::Point_2;
|
||||
using CGAL_Segment = CGAL::Arr_segment_traits_2<CGAL::Exact_predicates_exact_constructions_kernel>::Curve_2;
|
||||
|
||||
inline static CGAL_Point to_cgal_point(const VD::vertex_type &pt) { return {pt.x(), pt.y()}; }
|
||||
|
||||
// FIXME Lukas H.: Also includes parabolic segments.
|
||||
bool VoronoiUtilsCgal::is_voronoi_diagram_planar_intersection(const VD &voronoi_diagram)
|
||||
{
|
||||
assert(std::all_of(voronoi_diagram.edges().cbegin(), voronoi_diagram.edges().cend(),
|
||||
[](const VD::edge_type &edge) { return edge.color() == 0; }));
|
||||
|
||||
std::vector<CGAL_Segment> segments;
|
||||
segments.reserve(voronoi_diagram.num_edges());
|
||||
|
||||
for (const VD::edge_type &edge : voronoi_diagram.edges()) {
|
||||
if (edge.color() != 0)
|
||||
continue;
|
||||
|
||||
if (edge.is_finite() && edge.is_linear()) {
|
||||
segments.emplace_back(to_cgal_point(*edge.vertex0()), to_cgal_point(*edge.vertex1()));
|
||||
edge.color(1);
|
||||
assert(edge.twin() != nullptr);
|
||||
edge.twin()->color(1);
|
||||
}
|
||||
}
|
||||
|
||||
for (const VD::edge_type &edge : voronoi_diagram.edges())
|
||||
edge.color(0);
|
||||
|
||||
std::vector<CGAL_Point> intersections_pt;
|
||||
CGAL::compute_intersection_points(segments.begin(), segments.end(), std::back_inserter(intersections_pt));
|
||||
return intersections_pt.empty();
|
||||
}
|
||||
|
||||
static bool check_if_three_vectors_are_ccw(const CGAL_Point &common_pt, const CGAL_Point &pt_1, const CGAL_Point &pt_2, const CGAL_Point &test_pt) {
|
||||
CGAL::Orientation orientation = CGAL::orientation(common_pt, pt_1, pt_2);
|
||||
if (orientation == CGAL::Orientation::COLLINEAR) {
|
||||
// The first two edges are collinear, so the third edge must be on the right side on the first of them.
|
||||
return CGAL::orientation(common_pt, pt_1, test_pt) == CGAL::Orientation::RIGHT_TURN;
|
||||
} else if (orientation == CGAL::Orientation::LEFT_TURN) {
|
||||
// CCW oriented angle between vectors (common_pt, pt1) and (common_pt, pt2) is bellow PI.
|
||||
// So we need to check if test_pt isn't between them.
|
||||
CGAL::Orientation orientation1 = CGAL::orientation(common_pt, pt_1, test_pt);
|
||||
CGAL::Orientation orientation2 = CGAL::orientation(common_pt, pt_2, test_pt);
|
||||
return (orientation1 != CGAL::Orientation::LEFT_TURN || orientation2 != CGAL::Orientation::RIGHT_TURN);
|
||||
} else {
|
||||
assert(orientation == CGAL::Orientation::RIGHT_TURN);
|
||||
// CCW oriented angle between vectors (common_pt, pt1) and (common_pt, pt2) is upper PI.
|
||||
// So we need to check if test_pt is between them.
|
||||
CGAL::Orientation orientation1 = CGAL::orientation(common_pt, pt_1, test_pt);
|
||||
CGAL::Orientation orientation2 = CGAL::orientation(common_pt, pt_2, test_pt);
|
||||
return (orientation1 == CGAL::Orientation::RIGHT_TURN || orientation2 == CGAL::Orientation::LEFT_TURN);
|
||||
}
|
||||
}
|
||||
|
||||
bool VoronoiUtilsCgal::is_voronoi_diagram_planar_angle(const VoronoiDiagram &voronoi_diagram)
|
||||
{
|
||||
for (const VD::vertex_type &vertex : voronoi_diagram.vertices()) {
|
||||
std::vector<const VD::edge_type *> edges;
|
||||
const VD::edge_type *edge = vertex.incident_edge();
|
||||
|
||||
do {
|
||||
// FIXME Lukas H.: Also process parabolic segments.
|
||||
if (edge->is_finite() && edge->is_linear())
|
||||
edges.emplace_back(edge);
|
||||
|
||||
edge = edge->rot_next();
|
||||
} while (edge != vertex.incident_edge());
|
||||
|
||||
// Checking for CCW make sense for three and more edges.
|
||||
if (edges.size() > 2) {
|
||||
for (auto edge_it = edges.begin() ; edge_it != edges.end(); ++edge_it) {
|
||||
const Geometry::VoronoiDiagram::edge_type *prev_edge = edge_it == edges.begin() ? edges.back() : *std::prev(edge_it);
|
||||
const Geometry::VoronoiDiagram::edge_type *curr_edge = *edge_it;
|
||||
const Geometry::VoronoiDiagram::edge_type *next_edge = std::next(edge_it) == edges.end() ? edges.front() : *std::next(edge_it);
|
||||
|
||||
if (!check_if_three_vectors_are_ccw(to_cgal_point(*prev_edge->vertex0()), to_cgal_point(*prev_edge->vertex1()),
|
||||
to_cgal_point(*curr_edge->vertex1()), to_cgal_point(*next_edge->vertex1())))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
} // namespace Slic3r::Geometry
|
21
src/libslic3r/Geometry/VoronoiUtilsCgal.hpp
Normal file
@ -0,0 +1,21 @@
|
||||
#ifndef slic3r_VoronoiUtilsCgal_hpp_
|
||||
#define slic3r_VoronoiUtilsCgal_hpp_
|
||||
|
||||
#include "Voronoi.hpp"
|
||||
|
||||
namespace Slic3r::Geometry {
|
||||
class VoronoiDiagram;
|
||||
|
||||
class VoronoiUtilsCgal
|
||||
{
|
||||
public:
|
||||
// Check if the Voronoi diagram is planar using CGAL sweeping edge algorithm for enumerating all intersections between lines.
|
||||
static bool is_voronoi_diagram_planar_intersection(const VoronoiDiagram &voronoi_diagram);
|
||||
|
||||
// Check if the Voronoi diagram is planar using verification that all neighboring edges are ordered CCW for each vertex.
|
||||
static bool is_voronoi_diagram_planar_angle(const VoronoiDiagram &voronoi_diagram);
|
||||
|
||||
};
|
||||
} // namespace Slic3r::Geometry
|
||||
|
||||
#endif // slic3r_VoronoiUtilsCgal_hpp_
|
@ -274,7 +274,7 @@ void CGALMeshDeleter::operator()(CGALMesh *ptr) { delete ptr; }
|
||||
|
||||
bool does_bound_a_volume(const CGALMesh &mesh)
|
||||
{
|
||||
return CGALProc::does_bound_a_volume(mesh.m);
|
||||
return CGAL::is_closed(mesh.m) && CGALProc::does_bound_a_volume(mesh.m);
|
||||
}
|
||||
|
||||
bool empty(const CGALMesh &mesh)
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include "Format/OBJ.hpp"
|
||||
#include "Format/STL.hpp"
|
||||
#include "Format/3mf.hpp"
|
||||
#include "Format/STEP.hpp"
|
||||
|
||||
#include <float.h>
|
||||
|
||||
@ -114,13 +115,15 @@ Model Model::read_from_file(const std::string& input_file, DynamicPrintConfig* c
|
||||
result = load_stl(input_file.c_str(), &model);
|
||||
else if (boost::algorithm::iends_with(input_file, ".obj"))
|
||||
result = load_obj(input_file.c_str(), &model);
|
||||
else if (boost::algorithm::iends_with(input_file, ".step") || boost::algorithm::iends_with(input_file, ".stp"))
|
||||
result = load_step(input_file.c_str(), &model);
|
||||
else if (boost::algorithm::iends_with(input_file, ".amf") || boost::algorithm::iends_with(input_file, ".amf.xml"))
|
||||
result = load_amf(input_file.c_str(), config, config_substitutions, &model, options & LoadAttribute::CheckVersion);
|
||||
else if (boost::algorithm::iends_with(input_file, ".3mf"))
|
||||
//FIXME options & LoadAttribute::CheckVersion ?
|
||||
result = load_3mf(input_file.c_str(), *config, *config_substitutions, &model, false);
|
||||
else
|
||||
throw Slic3r::RuntimeError("Unknown file format. Input file must have .stl, .obj, .amf(.xml) or .prusa extension.");
|
||||
throw Slic3r::RuntimeError("Unknown file format. Input file must have .stl, .obj, .amf(.xml), .prusa or .step/.stp extension.");
|
||||
|
||||
if (! result)
|
||||
throw Slic3r::RuntimeError("Loading of a model file failed.");
|
||||
|
@ -10,6 +10,14 @@
|
||||
#include <cmath>
|
||||
#include <cassert>
|
||||
#include <stack>
|
||||
#include <unordered_map>
|
||||
|
||||
//#define ARACHNE_DEBUG
|
||||
|
||||
#ifdef ARACHNE_DEBUG
|
||||
#include "SVG.hpp"
|
||||
#include "Utils.hpp"
|
||||
#endif
|
||||
|
||||
namespace Slic3r {
|
||||
|
||||
@ -24,7 +32,18 @@ ExtrusionPaths thick_polyline_to_extrusion_paths(const ThickPolyline &thick_poly
|
||||
assert(line.a_width >= SCALED_EPSILON && line.b_width >= SCALED_EPSILON);
|
||||
|
||||
const coordf_t line_len = line.length();
|
||||
if (line_len < SCALED_EPSILON) continue;
|
||||
if (line_len < SCALED_EPSILON) {
|
||||
// The line is so tiny that we don't care about its width when we connect it to another line.
|
||||
if (!path.empty())
|
||||
path.polyline.points.back() = line.b; // If the variable path is non-empty, connect this tiny line to it.
|
||||
else if (i + 1 < (int)lines.size()) // If there is at least one following line, connect this tiny line to it.
|
||||
lines[i + 1].a = line.a;
|
||||
else if (!paths.empty())
|
||||
paths.back().polyline.points.back() = line.b; // Connect this tiny line to the last finished path.
|
||||
|
||||
// If any of the above isn't satisfied, then remove this tiny line.
|
||||
continue;
|
||||
}
|
||||
|
||||
double thickness_delta = fabs(line.a_width - line.b_width);
|
||||
if (thickness_delta > tolerance) {
|
||||
@ -100,14 +119,19 @@ static void variable_width(const ThickPolylines& polylines, ExtrusionRole role,
|
||||
// This value determines granularity of adaptive width, as G-code does not allow
|
||||
// variable extrusion within a single move; this value shall only affect the amount
|
||||
// of segments, and any pruning shall be performed before we apply this tolerance.
|
||||
const float tolerance = float(scale_(0.05));
|
||||
const auto tolerance = float(scale_(0.05));
|
||||
for (const ThickPolyline &p : polylines) {
|
||||
ExtrusionPaths paths = thick_polyline_to_extrusion_paths(p, role, flow, tolerance, tolerance);
|
||||
// Append paths to collection.
|
||||
if (! paths.empty()) {
|
||||
if (paths.front().first_point() == paths.back().last_point())
|
||||
out.emplace_back(new ExtrusionLoop(std::move(paths)));
|
||||
else {
|
||||
if (!paths.empty()) {
|
||||
for (auto it = std::next(paths.begin()); it != paths.end(); ++it) {
|
||||
assert(it->polyline.points.size() >= 2);
|
||||
assert(std::prev(it)->polyline.last_point() == it->polyline.first_point());
|
||||
}
|
||||
|
||||
if (paths.front().first_point() == paths.back().last_point()) {
|
||||
out.emplace_back(new ExtrusionLoop(std::move(paths)));
|
||||
} else {
|
||||
for (ExtrusionPath &path : paths)
|
||||
out.emplace_back(new ExtrusionPath(std::move(path)));
|
||||
}
|
||||
@ -462,8 +486,40 @@ static ExtrusionEntityCollection traverse_extrusions(const PerimeterGenerator &p
|
||||
// Reapply the nearest point search for starting point.
|
||||
// We allow polyline reversal because Clipper may have randomly reversed polylines during clipping.
|
||||
// Arachne sometimes creates extrusion with zero-length (just two same endpoints);
|
||||
if (!paths.empty())
|
||||
chain_and_reorder_extrusion_paths(paths, &paths.front().first_point());
|
||||
if (!paths.empty()) {
|
||||
Point start_point = paths.front().first_point();
|
||||
if (!extrusion->is_closed) {
|
||||
// Especially for open extrusion, we need to select a starting point that is at the start
|
||||
// or the end of the extrusions to make one continuous line. Also, we prefer a non-overhang
|
||||
// starting point.
|
||||
struct PointInfo
|
||||
{
|
||||
size_t occurrence = 0;
|
||||
bool is_overhang = false;
|
||||
};
|
||||
std::unordered_map<Point, PointInfo, PointHash> point_occurrence;
|
||||
for (const ExtrusionPath &path : paths) {
|
||||
++point_occurrence[path.polyline.first_point()].occurrence;
|
||||
++point_occurrence[path.polyline.last_point()].occurrence;
|
||||
if (path.role() == erOverhangPerimeter) {
|
||||
point_occurrence[path.polyline.first_point()].is_overhang = true;
|
||||
point_occurrence[path.polyline.last_point()].is_overhang = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Prefer non-overhang point as a starting point.
|
||||
for (const std::pair<Point, PointInfo> pt : point_occurrence)
|
||||
if (pt.second.occurrence == 1) {
|
||||
start_point = pt.first;
|
||||
if (!pt.second.is_overhang) {
|
||||
start_point = pt.first;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
chain_and_reorder_extrusion_paths(paths, &start_point);
|
||||
}
|
||||
} else {
|
||||
extrusion_paths_append(paths, *extrusion, role, is_external ? perimeter_generator.ext_perimeter_flow : perimeter_generator.perimeter_flow);
|
||||
}
|
||||
@ -488,6 +544,27 @@ static ExtrusionEntityCollection traverse_extrusions(const PerimeterGenerator &p
|
||||
return extrusion_coll;
|
||||
}
|
||||
|
||||
#ifdef ARACHNE_DEBUG
|
||||
static void export_perimeters_to_svg(const std::string &path, const Polygons &contours, const std::vector<Arachne::VariableWidthLines> &perimeters, const ExPolygons &infill_area)
|
||||
{
|
||||
coordf_t stroke_width = scale_(0.03);
|
||||
BoundingBox bbox = get_extents(contours);
|
||||
bbox.offset(scale_(1.));
|
||||
::Slic3r::SVG svg(path.c_str(), bbox);
|
||||
|
||||
svg.draw(infill_area, "cyan");
|
||||
|
||||
for (const Arachne::VariableWidthLines &perimeter : perimeters)
|
||||
for (const Arachne::ExtrusionLine &extrusion_line : perimeter) {
|
||||
ThickPolyline thick_polyline = to_thick_polyline(extrusion_line);
|
||||
svg.draw({thick_polyline}, "green", "blue", stroke_width);
|
||||
}
|
||||
|
||||
for (const Line &line : to_lines(contours))
|
||||
svg.draw(line, "red", stroke_width);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Thanks, Cura developers, for implementing an algorithm for generating perimeters with variable width (Arachne) that is based on the paper
|
||||
// "A framework for adaptive width control of dense contour-parallel toolpaths in fused deposition modeling"
|
||||
void PerimeterGenerator::process_arachne()
|
||||
@ -525,10 +602,28 @@ void PerimeterGenerator::process_arachne()
|
||||
ExPolygons last = offset_ex(surface.expolygon.simplify_p(m_scaled_resolution), - float(ext_perimeter_width / 2. - ext_perimeter_spacing / 2.));
|
||||
Polygons last_p = to_polygons(last);
|
||||
|
||||
Arachne::WallToolPaths wallToolPaths(last_p, ext_perimeter_spacing, perimeter_spacing, coord_t(loop_number + 1), 0, *this->object_config, *this->print_config);
|
||||
Arachne::WallToolPaths wallToolPaths(last_p, ext_perimeter_spacing, perimeter_spacing, coord_t(loop_number + 1), 0, layer_height, *this->object_config, *this->print_config);
|
||||
std::vector<Arachne::VariableWidthLines> perimeters = wallToolPaths.getToolPaths();
|
||||
loop_number = int(perimeters.size()) - 1;
|
||||
|
||||
#ifdef ARACHNE_DEBUG
|
||||
{
|
||||
static int iRun = 0;
|
||||
export_perimeters_to_svg(debug_out_path("arachne-perimeters-%d-%d.svg", layer_id, iRun++), to_polygons(last), perimeters, union_ex(wallToolPaths.getInnerContour()));
|
||||
}
|
||||
#endif
|
||||
|
||||
// All closed ExtrusionLine should have the same the first and the last point.
|
||||
// But in rare cases, Arachne produce ExtrusionLine marked as closed but without
|
||||
// equal the first and the last point.
|
||||
assert([&perimeters = std::as_const(perimeters)]() -> bool {
|
||||
for (const Arachne::VariableWidthLines &perimeter : perimeters)
|
||||
for (const Arachne::ExtrusionLine &el : perimeter)
|
||||
if (el.is_closed && el.junctions.front().p != el.junctions.back().p)
|
||||
return false;
|
||||
return true;
|
||||
}());
|
||||
|
||||
int start_perimeter = int(perimeters.size()) - 1;
|
||||
int end_perimeter = -1;
|
||||
int direction = -1;
|
||||
|
@ -448,7 +448,7 @@ static std::vector<std::string> s_Preset_print_options {
|
||||
"wipe_tower_width", "wipe_tower_rotation_angle", "wipe_tower_brim_width", "wipe_tower_bridging", "single_extruder_multi_material_priming", "mmu_segmented_region_max_width",
|
||||
"wipe_tower_no_sparse_layers", "compatible_printers", "compatible_printers_condition", "inherits",
|
||||
"perimeter_generator", "wall_transition_length", "wall_transition_filter_deviation", "wall_transition_angle",
|
||||
"wall_distribution_count", "wall_split_middle_threshold", "wall_add_middle_threshold", "min_feature_size", "min_bead_width"
|
||||
"wall_distribution_count", "min_feature_size", "min_bead_width"
|
||||
};
|
||||
|
||||
static std::vector<std::string> s_Preset_filament_options {
|
||||
@ -493,6 +493,7 @@ static std::vector<std::string> s_Preset_sla_print_options {
|
||||
"layer_height",
|
||||
"faded_layers",
|
||||
"supports_enable",
|
||||
"support_tree_type",
|
||||
"support_head_front_diameter",
|
||||
"support_head_penetration",
|
||||
"support_head_width",
|
||||
|
@ -453,10 +453,8 @@ void PresetBundle::save_changes_for_preset(const std::string& new_name, Preset::
|
||||
presets.get_edited_preset().config.apply_only(presets.get_selected_preset().config, unselected_options);
|
||||
}
|
||||
|
||||
#if ENABLE_COPY_CUSTOM_BED_MODEL_AND_TEXTURE
|
||||
if (type == Preset::TYPE_PRINTER)
|
||||
copy_bed_model_and_texture_if_needed(presets.get_edited_preset().config);
|
||||
#endif // ENABLE_COPY_CUSTOM_BED_MODEL_AND_TEXTURE
|
||||
|
||||
// Save the preset into Slic3r::data_dir / presets / section_name / preset_name.ini
|
||||
presets.save_current_preset(new_name);
|
||||
@ -1865,7 +1863,6 @@ void PresetBundle::set_default_suppressed(bool default_suppressed)
|
||||
printers.set_default_suppressed(default_suppressed);
|
||||
}
|
||||
|
||||
#if ENABLE_COPY_CUSTOM_BED_MODEL_AND_TEXTURE
|
||||
void copy_bed_model_and_texture_if_needed(DynamicPrintConfig& config)
|
||||
{
|
||||
const boost::filesystem::path user_dir = boost::filesystem::absolute(boost::filesystem::path(data_dir()) / "printer").make_preferred();
|
||||
@ -1891,6 +1888,5 @@ void copy_bed_model_and_texture_if_needed(DynamicPrintConfig& config)
|
||||
do_copy(config.option<ConfigOptionString>("bed_custom_texture"), "texture");
|
||||
do_copy(config.option<ConfigOptionString>("bed_custom_model"), "model");
|
||||
}
|
||||
#endif // ENABLE_COPY_CUSTOM_BED_MODEL_AND_TEXTURE
|
||||
|
||||
} // namespace Slic3r
|
||||
|
@ -178,11 +178,9 @@ private:
|
||||
|
||||
ENABLE_ENUM_BITMASK_OPERATORS(PresetBundle::LoadConfigBundleAttribute)
|
||||
|
||||
#if ENABLE_COPY_CUSTOM_BED_MODEL_AND_TEXTURE
|
||||
// Copies bed texture and model files to 'data_dir()\printer' folder, if needed
|
||||
// and updates the config accordingly
|
||||
extern void copy_bed_model_and_texture_if_needed(DynamicPrintConfig& config);
|
||||
#endif // ENABLE_COPY_CUSTOM_BED_MODEL_AND_TEXTURE
|
||||
|
||||
} // namespace Slic3r
|
||||
|
||||
|
@ -124,7 +124,7 @@ public:
|
||||
T * const * begin() const { return m_data->data(); }
|
||||
T * const * end() const { return m_data->data() + m_data->size(); }
|
||||
const T* front() const { return m_data->front(); }
|
||||
const T* back() const { return m_data->front(); }
|
||||
const T* back() const { return m_data->back(); }
|
||||
size_t size() const { return m_data->size(); }
|
||||
bool empty() const { return m_data->empty(); }
|
||||
const T* operator[](size_t i) const { return (*m_data)[i]; }
|
||||
|
@ -176,6 +176,12 @@ static const t_config_enum_values s_keys_map_SLAMaterialSpeed = {
|
||||
};
|
||||
CONFIG_OPTION_ENUM_DEFINE_STATIC_MAPS(SLAMaterialSpeed);
|
||||
|
||||
static inline const t_config_enum_values s_keys_map_SLASupportTreeType = {
|
||||
{"default", int(sla::SupportTreeType::Default)},
|
||||
{"branching", int(sla::SupportTreeType::Branching)}
|
||||
};
|
||||
CONFIG_OPTION_ENUM_DEFINE_STATIC_MAPS(SLASupportTreeType);
|
||||
|
||||
static const t_config_enum_values s_keys_map_BrimType = {
|
||||
{"no_brim", btNoBrim},
|
||||
{"outer_only", btOuterOnly},
|
||||
@ -3069,7 +3075,8 @@ void PrintConfigDef::init_fff_params()
|
||||
def->category = L("Layers and Perimeters");
|
||||
def->tooltip = L("Classic perimeter generator produces perimeters with constant extrusion width and for "
|
||||
"very thin areas is used gap-fill. "
|
||||
"Arachne engine produces perimeters with variable extrusion width.");
|
||||
"Arachne engine produces perimeters with variable extrusion width. "
|
||||
"This setting also affects the Concentric infill.");
|
||||
def->enum_keys_map = &ConfigOptionEnum<PerimeterGeneratorType>::get_enum_values();
|
||||
def->enum_values.push_back("classic");
|
||||
def->enum_values.push_back("arachne");
|
||||
@ -3078,15 +3085,16 @@ void PrintConfigDef::init_fff_params()
|
||||
def->mode = comAdvanced;
|
||||
def->set_default_value(new ConfigOptionEnum<PerimeterGeneratorType>(PerimeterGeneratorType::Arachne));
|
||||
|
||||
def = this->add("wall_transition_length", coFloat);
|
||||
def = this->add("wall_transition_length", coFloatOrPercent);
|
||||
def->label = L("Perimeter transition length");
|
||||
def->category = L("Advanced");
|
||||
def->tooltip = L("When transitioning between different numbers of perimeters as the part becomes "
|
||||
"thinner, a certain amount of space is allotted to split or join the perimeter segments.");
|
||||
def->sidetext = L("mm");
|
||||
"thinner, a certain amount of space is allotted to split or join the perimeter segments. "
|
||||
"If expressed as a percentage (for example 100%), it will be computed based on the nozzle diameter.");
|
||||
def->sidetext = L("mm or %");
|
||||
def->mode = comExpert;
|
||||
def->min = 0;
|
||||
def->set_default_value(new ConfigOptionFloat(0.4));
|
||||
def->set_default_value(new ConfigOptionFloatOrPercent(100, true));
|
||||
|
||||
def = this->add("wall_transition_filter_deviation", coFloatOrPercent);
|
||||
def->label = L("Perimeter transitioning filter margin");
|
||||
@ -3125,46 +3133,17 @@ void PrintConfigDef::init_fff_params()
|
||||
def->min = 1;
|
||||
def->set_default_value(new ConfigOptionInt(1));
|
||||
|
||||
def = this->add("wall_split_middle_threshold", coPercent);
|
||||
def->label = L("Split middle perimeter threshold");
|
||||
def->category = L("Advanced");
|
||||
def->tooltip = L("The smallest extrusion width, as a factor of the normal extrusion width, above which the middle "
|
||||
"perimeter (if there is one) will be split into two. Reduce this setting to use more, thinner "
|
||||
"perimeters. Increase to use fewer, wider perimeters. Note that this applies -as if- the entire "
|
||||
"shape should be filled with perimeter, so the middle here refers to the middle of the object "
|
||||
"between two outer edges of the shape, even if there actually is infill or other extrusion types in "
|
||||
"the print instead of the perimeter.");
|
||||
def->sidetext = L("%");
|
||||
def->mode = comAdvanced;
|
||||
def->min = 1;
|
||||
def->max = 99;
|
||||
def->set_default_value(new ConfigOptionPercent(50));
|
||||
|
||||
def = this->add("wall_add_middle_threshold", coPercent);
|
||||
def->label = L("Add middle perimeter threshold");
|
||||
def->category = L("Advanced");
|
||||
def->tooltip = L("The smallest extrusion width, as a factor of the normal extrusion width, above which a middle "
|
||||
"perimeter (if there wasn't one already) will be added. Reduce this setting to use more, "
|
||||
"thinner perimeters. Increase to use fewer, wider perimeters. Note that this applies -as if- the "
|
||||
"entire shape should be filled with perimeter, so the middle here refers to the middle of the "
|
||||
"object between two outer edges of the shape, even if there actually is infill or other "
|
||||
"extrusion types in the print instead of the perimeter.");
|
||||
def->sidetext = L("%");
|
||||
def->mode = comAdvanced;
|
||||
def->min = 1;
|
||||
def->max = 99;
|
||||
def->set_default_value(new ConfigOptionPercent(75));
|
||||
|
||||
def = this->add("min_feature_size", coFloat);
|
||||
def = this->add("min_feature_size", coFloatOrPercent);
|
||||
def->label = L("Minimum feature size");
|
||||
def->category = L("Advanced");
|
||||
def->tooltip = L("Minimum thickness of thin features. Model features that are thinner than this value will "
|
||||
"not be printed, while features thicker than the Minimum feature size will be widened to "
|
||||
"the Minimum perimeter width.");
|
||||
def->sidetext = L("mm");
|
||||
"the Minimum perimeter width. "
|
||||
"If expressed as a percentage (for example 25%), it will be computed based on the nozzle diameter.");
|
||||
def->sidetext = L("mm or %");
|
||||
def->mode = comExpert;
|
||||
def->min = 0;
|
||||
def->set_default_value(new ConfigOptionFloat(0.1));
|
||||
def->set_default_value(new ConfigOptionFloatOrPercent(25, true));
|
||||
|
||||
def = this->add("min_bead_width", coFloatOrPercent);
|
||||
def->label = L("Minimum perimeter width");
|
||||
@ -3564,6 +3543,17 @@ void PrintConfigDef::init_sla_params()
|
||||
def->mode = comSimple;
|
||||
def->set_default_value(new ConfigOptionBool(true));
|
||||
|
||||
def = this->add("support_tree_type", coEnum);
|
||||
def->label = L("Support tree type");
|
||||
def->tooltip = L("Support tree building strategy");
|
||||
def->enum_keys_map = &ConfigOptionEnum<sla::SupportTreeType>::get_enum_values();
|
||||
def->enum_values = ConfigOptionEnum<sla::SupportTreeType>::get_enum_names();
|
||||
def->enum_labels = ConfigOptionEnum<sla::SupportTreeType>::get_enum_names();
|
||||
def->enum_labels[0] = L("Default");
|
||||
def->enum_labels[1] = L("Branching");
|
||||
def->mode = comAdvanced;
|
||||
def->set_default_value(new ConfigOptionEnum(sla::SupportTreeType::Default));
|
||||
|
||||
def = this->add("support_head_front_diameter", coFloat);
|
||||
def->label = L("Pinhead front diameter");
|
||||
def->category = L("Supports");
|
||||
@ -3649,13 +3639,17 @@ void PrintConfigDef::init_sla_params()
|
||||
def = this->add("support_pillar_widening_factor", coFloat);
|
||||
def->label = L("Pillar widening factor");
|
||||
def->category = L("Supports");
|
||||
def->tooltip = L("Merging bridges or pillars into another pillars can "
|
||||
"increase the radius. Zero means no increase, one means "
|
||||
"full increase.");
|
||||
def->tooltip = L(
|
||||
"Merging bridges or pillars into another pillars can "
|
||||
"increase the radius. Zero means no increase, one means "
|
||||
"full increase. The exact amount of increase is unspecified and can "
|
||||
"change in the future. What is garanteed is that thickness will not "
|
||||
"exceed \"support_base_diameter\"");
|
||||
|
||||
def->min = 0;
|
||||
def->max = 1;
|
||||
def->mode = comExpert;
|
||||
def->set_default_value(new ConfigOptionFloat(0.0));
|
||||
def->set_default_value(new ConfigOptionFloat(0.15));
|
||||
|
||||
def = this->add("support_base_diameter", coFloat);
|
||||
def->label = L("Support base diameter");
|
||||
@ -4027,6 +4021,8 @@ void PrintConfigDef::handle_legacy(t_config_option_key &opt_key, std::string &va
|
||||
"serial_port", "serial_speed",
|
||||
// Introduced in some PrusaSlicer 2.3.1 alpha, later renamed or removed.
|
||||
"fuzzy_skin_perimeter_mode", "fuzzy_skin_shape",
|
||||
// Introduced in PrusaSlicer 2.3.0-alpha2, later replaced by automatic calculation based on extrusion width.
|
||||
"wall_add_middle_threshold", "wall_split_middle_threshold",
|
||||
};
|
||||
|
||||
// In PrusaSlicer 2.3.0-alpha0 the "monotonous" infill was introduced, which was later renamed to "monotonic".
|
||||
|
@ -155,6 +155,7 @@ CONFIG_OPTION_ENUM_DECLARE_STATIC_MAPS(SupportMaterialInterfacePattern)
|
||||
CONFIG_OPTION_ENUM_DECLARE_STATIC_MAPS(SeamPosition)
|
||||
CONFIG_OPTION_ENUM_DECLARE_STATIC_MAPS(SLADisplayOrientation)
|
||||
CONFIG_OPTION_ENUM_DECLARE_STATIC_MAPS(SLAPillarConnectionMode)
|
||||
CONFIG_OPTION_ENUM_DECLARE_STATIC_MAPS(SLASupportTreeType)
|
||||
CONFIG_OPTION_ENUM_DECLARE_STATIC_MAPS(BrimType)
|
||||
CONFIG_OPTION_ENUM_DECLARE_STATIC_MAPS(DraftShield)
|
||||
CONFIG_OPTION_ENUM_DECLARE_STATIC_MAPS(GCodeThumbnailsFormat)
|
||||
@ -507,13 +508,11 @@ PRINT_CONFIG_CLASS_DEFINE(
|
||||
((ConfigOptionFloat, slice_closing_radius))
|
||||
((ConfigOptionEnum<SlicingMode>, slicing_mode))
|
||||
((ConfigOptionEnum<PerimeterGeneratorType>, perimeter_generator))
|
||||
((ConfigOptionFloat, wall_transition_length))
|
||||
((ConfigOptionFloatOrPercent, wall_transition_length))
|
||||
((ConfigOptionFloatOrPercent, wall_transition_filter_deviation))
|
||||
((ConfigOptionFloat, wall_transition_angle))
|
||||
((ConfigOptionInt, wall_distribution_count))
|
||||
((ConfigOptionPercent, wall_split_middle_threshold))
|
||||
((ConfigOptionPercent, wall_add_middle_threshold))
|
||||
((ConfigOptionFloat, min_feature_size))
|
||||
((ConfigOptionFloatOrPercent, min_feature_size))
|
||||
((ConfigOptionFloatOrPercent, min_bead_width))
|
||||
((ConfigOptionBool, support_material))
|
||||
// Automatic supports (generated based on support_material_threshold).
|
||||
@ -829,6 +828,8 @@ PRINT_CONFIG_CLASS_DEFINE(
|
||||
// Enabling or disabling support creation
|
||||
((ConfigOptionBool, supports_enable))
|
||||
|
||||
((ConfigOptionEnum<sla::SupportTreeType>, support_tree_type))
|
||||
|
||||
// Diameter in mm of the pointing side of the head.
|
||||
((ConfigOptionFloat, support_head_front_diameter))/*= 0.2*/
|
||||
|
||||
|
@ -673,8 +673,6 @@ bool PrintObject::invalidate_state_by_config_options(
|
||||
|| opt_key == "wall_transition_filter_deviation"
|
||||
|| opt_key == "wall_transition_angle"
|
||||
|| opt_key == "wall_distribution_count"
|
||||
|| opt_key == "wall_split_middle_threshold"
|
||||
|| opt_key == "wall_add_middle_threshold"
|
||||
|| opt_key == "min_feature_size"
|
||||
|| opt_key == "min_bead_width") {
|
||||
steps.emplace_back(posSlice);
|
||||
|
257
src/libslic3r/SLA/BranchingTreeSLA.cpp
Normal file
@ -0,0 +1,257 @@
|
||||
#include "BranchingTreeSLA.hpp"
|
||||
|
||||
#include "libslic3r/Execution/ExecutionTBB.hpp"
|
||||
|
||||
#include "libslic3r/KDTreeIndirect.hpp"
|
||||
|
||||
#include "SupportTreeUtils.hpp"
|
||||
#include "BranchingTree/PointCloud.hpp"
|
||||
|
||||
#include "Pad.hpp"
|
||||
|
||||
#include <map>
|
||||
|
||||
namespace Slic3r { namespace sla {
|
||||
|
||||
class BranchingTreeBuilder: public branchingtree::Builder {
|
||||
SupportTreeBuilder &m_builder;
|
||||
const SupportableMesh &m_sm;
|
||||
const branchingtree::PointCloud &m_cloud;
|
||||
|
||||
// Scaling of the input value 'widening_factor:<0, 1>' to produce resonable
|
||||
// widening behaviour
|
||||
static constexpr double WIDENING_SCALE = 0.02;
|
||||
|
||||
double get_radius(const branchingtree::Node &j)
|
||||
{
|
||||
double w = WIDENING_SCALE * m_sm.cfg.pillar_widening_factor * j.weight;
|
||||
|
||||
return std::min(m_sm.cfg.base_radius_mm, double(j.Rmin) + w);
|
||||
}
|
||||
|
||||
std::vector<size_t> m_unroutable_pinheads;
|
||||
|
||||
void build_subtree(size_t root)
|
||||
{
|
||||
traverse(m_cloud, root, [this](const branchingtree::Node &node) {
|
||||
if (node.left >= 0 && node.right >= 0) {
|
||||
auto nparent = m_cloud.get(node.id);
|
||||
auto nleft = m_cloud.get(node.left);
|
||||
auto nright = m_cloud.get(node.right);
|
||||
Vec3d from1d = nleft.pos.cast<double>();
|
||||
Vec3d from2d = nright.pos.cast<double>();
|
||||
Vec3d tod = nparent.pos.cast<double>();
|
||||
double mergeR = get_radius(nparent);
|
||||
double leftR = get_radius(nleft);
|
||||
double rightR = get_radius(nright);
|
||||
|
||||
m_builder.add_diffbridge(from1d, tod, leftR, mergeR);
|
||||
m_builder.add_diffbridge(from2d, tod, rightR, mergeR);
|
||||
m_builder.add_junction(tod, mergeR);
|
||||
} else if (int child = node.left + node.right + 1; child >= 0) {
|
||||
auto from = m_cloud.get(child);
|
||||
auto to = m_cloud.get(node.id);
|
||||
auto tod = to.pos.cast<double>();
|
||||
double toR = get_radius(to);
|
||||
m_builder.add_diffbridge(from.pos.cast<double>(),
|
||||
tod,
|
||||
get_radius(from),
|
||||
toR);
|
||||
m_builder.add_junction(tod, toR);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void discard_subtree(size_t root)
|
||||
{
|
||||
// Discard all the support points connecting to this branch.
|
||||
traverse(m_cloud, root, [this](const branchingtree::Node &node) {
|
||||
int suppid_parent = m_cloud.get_leaf_id(node.id);
|
||||
int suppid_left = m_cloud.get_leaf_id(node.left);
|
||||
int suppid_right = m_cloud.get_leaf_id(node.right);
|
||||
if (suppid_parent >= 0)
|
||||
m_unroutable_pinheads.emplace_back(suppid_parent);
|
||||
if (suppid_left >= 0)
|
||||
m_unroutable_pinheads.emplace_back(suppid_left);
|
||||
if (suppid_right >= 0)
|
||||
m_unroutable_pinheads.emplace_back(suppid_right);
|
||||
});
|
||||
}
|
||||
|
||||
public:
|
||||
BranchingTreeBuilder(SupportTreeBuilder &builder,
|
||||
const SupportableMesh &sm,
|
||||
const branchingtree::PointCloud &cloud)
|
||||
: m_builder{builder}, m_sm{sm}, m_cloud{cloud}
|
||||
{}
|
||||
|
||||
bool add_bridge(const branchingtree::Node &from,
|
||||
const branchingtree::Node &to) override;
|
||||
|
||||
bool add_merger(const branchingtree::Node &node,
|
||||
const branchingtree::Node &closest,
|
||||
const branchingtree::Node &merge_node) override;
|
||||
|
||||
bool add_ground_bridge(const branchingtree::Node &from,
|
||||
const branchingtree::Node &/*to*/) override;
|
||||
|
||||
bool add_mesh_bridge(const branchingtree::Node &from,
|
||||
const branchingtree::Node &to) override;
|
||||
|
||||
void report_unroutable(const branchingtree::Node &j) override
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error) << "Cannot route junction at " << j.pos.x()
|
||||
<< " " << j.pos.y() << " " << j.pos.z();
|
||||
|
||||
// Discard all the support points connecting to this branch.
|
||||
discard_subtree(j.id);
|
||||
}
|
||||
|
||||
const std::vector<size_t>& unroutable_pinheads() const
|
||||
{
|
||||
return m_unroutable_pinheads;
|
||||
}
|
||||
|
||||
bool is_valid() const override { return !m_builder.ctl().stopcondition(); }
|
||||
};
|
||||
|
||||
bool BranchingTreeBuilder::add_bridge(const branchingtree::Node &from,
|
||||
const branchingtree::Node &to)
|
||||
{
|
||||
Vec3d fromd = from.pos.cast<double>(), tod = to.pos.cast<double>();
|
||||
double fromR = get_radius(from), toR = get_radius(to);
|
||||
Beam beam{Ball{fromd, fromR}, Ball{tod, toR}};
|
||||
auto hit = beam_mesh_hit(ex_tbb, m_sm.emesh, beam,
|
||||
m_sm.cfg.safety_distance_mm);
|
||||
|
||||
bool ret = hit.distance() > (tod - fromd).norm();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool BranchingTreeBuilder::add_merger(const branchingtree::Node &node,
|
||||
const branchingtree::Node &closest,
|
||||
const branchingtree::Node &merge_node)
|
||||
{
|
||||
Vec3d from1d = node.pos.cast<double>(),
|
||||
from2d = closest.pos.cast<double>(),
|
||||
tod = merge_node.pos.cast<double>();
|
||||
|
||||
double mergeR = get_radius(merge_node);
|
||||
double nodeR = get_radius(node);
|
||||
double closestR = get_radius(closest);
|
||||
Beam beam1{Ball{from1d, nodeR}, Ball{tod, mergeR}};
|
||||
Beam beam2{Ball{from2d, closestR}, Ball{tod, mergeR}};
|
||||
|
||||
auto sd = m_sm.cfg.safety_distance_mm ;
|
||||
auto hit1 = beam_mesh_hit(ex_tbb, m_sm.emesh, beam1, sd);
|
||||
auto hit2 = beam_mesh_hit(ex_tbb, m_sm.emesh, beam2, sd);
|
||||
|
||||
bool ret = hit1.distance() > (tod - from1d).norm() &&
|
||||
hit2.distance() > (tod - from2d).norm();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool BranchingTreeBuilder::add_ground_bridge(const branchingtree::Node &from,
|
||||
const branchingtree::Node &to)
|
||||
{
|
||||
bool ret = search_ground_route(ex_tbb, m_builder, m_sm,
|
||||
sla::Junction{from.pos.cast<double>(),
|
||||
get_radius(from)},
|
||||
get_radius(to)).first;
|
||||
|
||||
if (ret) {
|
||||
build_subtree(from.id);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool BranchingTreeBuilder::add_mesh_bridge(const branchingtree::Node &from,
|
||||
const branchingtree::Node &to)
|
||||
{
|
||||
sla::Junction fromj = {from.pos.cast<double>(), get_radius(from)};
|
||||
|
||||
auto anchor = m_sm.cfg.ground_facing_only ?
|
||||
std::optional<Anchor>{} : // If no mesh connections are allowed
|
||||
calculate_anchor_placement(ex_tbb, m_sm, fromj,
|
||||
to.pos.cast<double>());
|
||||
|
||||
if (anchor) {
|
||||
sla::Junction toj = {anchor->junction_point(), anchor->r_back_mm};
|
||||
|
||||
auto hit = beam_mesh_hit(ex_tbb, m_sm.emesh,
|
||||
Beam{{fromj.pos, fromj.r}, {toj.pos, toj.r}}, 0.);
|
||||
|
||||
if (hit.distance() > distance(fromj.pos, toj.pos)) {
|
||||
m_builder.add_diffbridge(fromj.pos, toj.pos, fromj.r, toj.r);
|
||||
m_builder.add_anchor(*anchor);
|
||||
|
||||
build_subtree(from.id);
|
||||
} else {
|
||||
anchor.reset();
|
||||
}
|
||||
}
|
||||
|
||||
return bool(anchor);
|
||||
}
|
||||
|
||||
void create_branching_tree(SupportTreeBuilder &builder, const SupportableMesh &sm)
|
||||
{
|
||||
auto coordfn = [&sm](size_t id, size_t dim) { return sm.pts[id].pos(dim); };
|
||||
KDTreeIndirect<3, float, decltype (coordfn)> tree{coordfn, sm.pts.size()};
|
||||
|
||||
auto nondup_idx = non_duplicate_suppt_indices(tree, sm.pts, 0.1);
|
||||
std::vector<std::optional<Head>> heads(nondup_idx.size());
|
||||
auto leafs = reserve_vector<branchingtree::Node>(nondup_idx.size());
|
||||
|
||||
execution::for_each(
|
||||
ex_tbb, size_t(0), nondup_idx.size(),
|
||||
[&sm, &heads, &nondup_idx, &builder](size_t i) {
|
||||
if (!builder.ctl().stopcondition())
|
||||
heads[i] = calculate_pinhead_placement(ex_seq, sm, nondup_idx[i]);
|
||||
},
|
||||
execution::max_concurrency(ex_tbb)
|
||||
);
|
||||
|
||||
if (builder.ctl().stopcondition())
|
||||
return;
|
||||
|
||||
for (auto &h : heads)
|
||||
if (h && h->is_valid()) {
|
||||
leafs.emplace_back(h->junction_point().cast<float>(), h->r_back_mm);
|
||||
h->id = leafs.size() - 1;
|
||||
builder.add_head(h->id, *h);
|
||||
}
|
||||
|
||||
auto &its = *sm.emesh.get_triangle_mesh();
|
||||
ExPolygons bedpolys = {branchingtree::make_bed_poly(its)};
|
||||
|
||||
auto props = branchingtree::Properties{}
|
||||
.bed_shape(bedpolys)
|
||||
.ground_level(sla::ground_level(sm))
|
||||
.max_slope(sm.cfg.bridge_slope)
|
||||
.max_branch_length(sm.cfg.max_bridge_length_mm);
|
||||
|
||||
auto meshpts = sm.cfg.ground_facing_only ?
|
||||
std::vector<branchingtree::Node>{} :
|
||||
branchingtree::sample_mesh(its,
|
||||
props.sampling_radius());
|
||||
|
||||
auto bedpts = branchingtree::sample_bed(props.bed_shape(),
|
||||
props.ground_level(),
|
||||
props.sampling_radius());
|
||||
|
||||
branchingtree::PointCloud nodes{std::move(meshpts), std::move(bedpts),
|
||||
std::move(leafs), props};
|
||||
|
||||
BranchingTreeBuilder vbuilder{builder, sm, nodes};
|
||||
branchingtree::build_tree(nodes, vbuilder);
|
||||
|
||||
for (size_t id : vbuilder.unroutable_pinheads())
|
||||
builder.head(id).invalidate();
|
||||
|
||||
}
|
||||
|
||||
}} // namespace Slic3r::sla
|