From 57a899a412b0d45c159423925c1f63164dcd9308 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Eduardo=20Jos=C3=A9=20Tagle?= <ejtagle@hotmail.com>
Date: Wed, 11 Apr 2018 20:13:42 -0300
Subject: [PATCH] [2.0.x] 6th-order jerk-controlled motion planning in
 real-time for AVR (#10373)

---
 Marlin/src/inc/SanityCheck.h  |   2 -
 Marlin/src/module/planner.cpp | 532 +++++++++++++++++++++-
 Marlin/src/module/planner.h   |   6 +-
 Marlin/src/module/stepper.cpp | 834 +++++++++++++++++++++++++++++-----
 Marlin/src/module/stepper.h   |  17 +-
 5 files changed, 1265 insertions(+), 126 deletions(-)

diff --git a/Marlin/src/inc/SanityCheck.h b/Marlin/src/inc/SanityCheck.h
index 8d25c9b53f..210246a9cc 100644
--- a/Marlin/src/inc/SanityCheck.h
+++ b/Marlin/src/inc/SanityCheck.h
@@ -99,8 +99,6 @@
   #error "Z_ENDSTOP_SERVO_NR is now Z_PROBE_SERVO_NR. Please update your configuration."
 #elif defined(DEFAULT_XYJERK)
   #error "DEFAULT_XYJERK is deprecated. Use DEFAULT_XJERK and DEFAULT_YJERK instead."
-#elif ENABLED(BEZIER_JERK_CONTROL) && !defined(CPU_32_BIT)
-  #error "BEZIER_JERK_CONTROL is computationally intensive and requires a 32-bit board."
 #elif defined(XY_TRAVEL_SPEED)
   #error "XY_TRAVEL_SPEED is deprecated. Use XY_PROBE_SPEED instead."
 #elif defined(PROBE_SERVO_DEACTIVATION_DELAY)
diff --git a/Marlin/src/module/planner.cpp b/Marlin/src/module/planner.cpp
index cd95bb04f9..bb0cb8eb2e 100644
--- a/Marlin/src/module/planner.cpp
+++ b/Marlin/src/module/planner.cpp
@@ -56,6 +56,10 @@
  *
  * IntersectionDistance[s1_, s2_, a_, d_] := (2 a d - s1^2 + s2^2)/(4 a)
  *
+ * --
+ *
+ * The fast inverse function needed for Bézier interpolation for AVR
+ * was designed, written and tested by Eduardo José Tagle on April/2018
  */
 
 #include "planner.h"
@@ -215,6 +219,523 @@ void Planner::init() {
   #endif
 }
 
+#if ENABLED(BEZIER_JERK_CONTROL)
+
+  #ifdef __AVR__
+    // This routine, for AVR, returns 0x1000000 / d, but trying to get the inverse as
+    //  fast as possible. A fast converging iterative Newton-Raphson method is able to
+    //  reach full precision in just 1 iteration, and takes 211 cycles (worst case, mean
+    //  case is less, up to 30 cycles for small divisors), instead of the 500 cycles a
+    //  normal division would take.
+    //
+    // Inspired by the following page,
+    //  https://stackoverflow.com/questions/27801397/newton-raphson-division-with-big-integers
+    //
+    // Suppose we want to calculate
+    //  floor(2 ^ k / B)    where B is a positive integer
+    // Then
+    //  B must be <= 2^k, otherwise, the quotient is 0.
+    //
+    // The Newton - Raphson iteration for x = B / 2 ^ k yields:
+    //  q[n + 1] = q[n] * (2 - q[n] * B / 2 ^ k)
+    //
+    // We can rearrange it as:
+    //  q[n + 1] = q[n] * (2 ^ (k + 1) - q[n] * B) >> k
+    //
+    //  Each iteration of this kind requires only integer multiplications
+    // and bit shifts.
+    //  Does it converge to floor(2 ^ k / B) ?:  Not necessarily, but, in
+    // the worst case, it eventually alternates between floor(2 ^ k / B)
+    // and ceiling(2 ^ k / B)).
+    //  So we can use some not-so-clever test to see if we are in this
+    // case, and extract floor(2 ^ k / B).
+    //  Lastly, a simple but important optimization for this approach is to
+    // truncate multiplications (i.e.calculate only the higher bits of the
+    // product) in the early iterations of the Newton - Raphson method.The
+    // reason to do so, is that the results of the early iterations are far
+    // from the quotient, and it doesn't matter to perform them inaccurately.
+    //  Finally, we should pick a good starting value for x. Knowing how many
+    // digits the divisor has, we can estimate it:
+    //
+    // 2^k / x = 2 ^ log2(2^k / x)
+    // 2^k / x = 2 ^(log2(2^k)-log2(x))
+    // 2^k / x = 2 ^(k*log2(2)-log2(x))
+    // 2^k / x = 2 ^ (k-log2(x))
+    // 2^k / x >= 2 ^ (k-floor(log2(x)))
+    // floor(log2(x)) simply is the index of the most significant bit set.
+    //
+    //  If we could improve this estimation even further, then the number of
+    // iterations can be dropped quite a bit, thus saving valuable execution time.
+    //  The paper "Software Integer Division" by Thomas L.Rodeheffer, Microsoft
+    // Research, Silicon Valley,August 26, 2008, that is available at
+    // https://www.microsoft.com/en-us/research/wp-content/uploads/2008/08/tr-2008-141.pdf
+    // suggests , for its integer division algorithm, that using a table to supply the
+    // first 8 bits of precision, and due to the quadratic convergence nature of the
+    // Newton-Raphon iteration, then just 2 iterations should be enough to get
+    // maximum precision of the division.
+    //  If we precompute values of inverses for small denominator values, then
+    // just one Newton-Raphson iteration is enough to reach full precision
+    //  We will use the top 9 bits of the denominator as index.
+    //
+    //  The AVR assembly function is implementing the following C code, included
+    // here as reference:
+    //
+    // uint32_t get_period_inverse(uint32_t d) {
+    //  static const uint8_t inv_tab[256] = {
+    //    255,253,252,250,248,246,244,242,240,238,236,234,233,231,229,227,
+    //    225,224,222,220,218,217,215,213,212,210,208,207,205,203,202,200,
+    //    199,197,195,194,192,191,189,188,186,185,183,182,180,179,178,176,
+    //    175,173,172,170,169,168,166,165,164,162,161,160,158,157,156,154,
+    //    153,152,151,149,148,147,146,144,143,142,141,139,138,137,136,135,
+    //    134,132,131,130,129,128,127,126,125,123,122,121,120,119,118,117,
+    //    116,115,114,113,112,111,110,109,108,107,106,105,104,103,102,101,
+    //    100,99,98,97,96,95,94,93,92,91,90,89,88,88,87,86,
+    //    85,84,83,82,81,80,80,79,78,77,76,75,74,74,73,72,
+    //    71,70,70,69,68,67,66,66,65,64,63,62,62,61,60,59,
+    //    59,58,57,56,56,55,54,53,53,52,51,50,50,49,48,48,
+    //    47,46,46,45,44,43,43,42,41,41,40,39,39,38,37,37,
+    //    36,35,35,34,33,33,32,32,31,30,30,29,28,28,27,27,
+    //    26,25,25,24,24,23,22,22,21,21,20,19,19,18,18,17,
+    //    17,16,15,15,14,14,13,13,12,12,11,10,10,9,9,8,
+    //    8,7,7,6,6,5,5,4,4,3,3,2,2,1,0,0
+    //  };
+    //
+    //  // For small denominators, it is cheaper to directly store the result,
+    //  //  because those denominators would require 2 Newton-Raphson iterations
+    //  //  to converge to the required result precision. For bigger ones, just
+    //  //  ONE Newton-Raphson iteration is enough to get maximum precision!
+    //  static const uint32_t small_inv_tab[111] PROGMEM = {
+    //    16777216,16777216,8388608,5592405,4194304,3355443,2796202,2396745,2097152,1864135,1677721,1525201,1398101,1290555,1198372,1118481,
+    //    1048576,986895,932067,883011,838860,798915,762600,729444,699050,671088,645277,621378,599186,578524,559240,541200,
+    //    524288,508400,493447,479349,466033,453438,441505,430185,419430,409200,399457,390167,381300,372827,364722,356962,
+    //    349525,342392,335544,328965,322638,316551,310689,305040,299593,294337,289262,284359,279620,275036,270600,266305,
+    //    262144,258111,254200,250406,246723,243148,239674,236298,233016,229824,226719,223696,220752,217885,215092,212369,
+    //    209715,207126,204600,202135,199728,197379,195083,192841,190650,188508,186413,184365,182361,180400,178481,176602,
+    //    174762,172960,171196,169466,167772,166111,164482,162885,161319,159783,158275,156796,155344,153919,152520
+    //  };
+    //
+    //  // For small divisors, it is best to directly retrieve the results
+    //  if (d <= 110)
+    //    return pgm_read_dword(&small_inv_tab[d]);
+    //
+    //  // Compute initial estimation of 0x1000000/x -
+    //  // Get most significant bit set on divider
+    //  uint8_t idx = 0;
+    //  uint32_t nr = d;
+    //  if (!(nr & 0xff0000)) {
+    //    nr <<= 8;
+    //    idx += 8;
+    //    if (!(nr & 0xff0000)) {
+    //      nr <<= 8;
+    //      idx += 8;
+    //    }
+    //  }
+    //  if (!(nr & 0xf00000)) {
+    //    nr <<= 4;
+    //    idx += 4;
+    //  }
+    //  if (!(nr & 0xc00000)) {
+    //    nr <<= 2;
+    //    idx += 2;
+    //  }
+    //  if (!(nr & 0x800000)) {
+    //    nr <<= 1;
+    //    idx += 1;
+    //  }
+    //
+    //  // Isolate top 9 bits of the denominator, to be used as index into the initial estimation table
+    //  uint32_t tidx = nr >> 15;         // top 9 bits. bit8 is always set
+    //  uint32_t ie = inv_tab[tidx & 0xFF] + 256; // Get the table value. bit9 is always set
+    //  uint32_t x = idx <= 8 ? (ie >> (8 - idx)) : (ie << (idx - 8)); // Position the estimation at the proper place
+    //
+    //  // Now, refine estimation by newton-raphson. 1 iteration is enough
+    //  x = uint32_t((x * uint64_t((1 << 25) - x * d)) >> 24);
+    //
+    //  // Estimate remainder
+    //  uint32_t r = (1 << 24) - x * d;
+    //
+    //  // Check if we must adjust result
+    //  if (r >= d) x++;
+    //
+    //  // x holds the proper estimation
+    //  return uint32_t(x);
+    // }
+    //
+    static uint32_t get_period_inverse(uint32_t d) {
+
+       static const uint8_t inv_tab[256] PROGMEM = {
+        255,253,252,250,248,246,244,242,240,238,236,234,233,231,229,227,
+        225,224,222,220,218,217,215,213,212,210,208,207,205,203,202,200,
+        199,197,195,194,192,191,189,188,186,185,183,182,180,179,178,176,
+        175,173,172,170,169,168,166,165,164,162,161,160,158,157,156,154,
+        153,152,151,149,148,147,146,144,143,142,141,139,138,137,136,135,
+        134,132,131,130,129,128,127,126,125,123,122,121,120,119,118,117,
+        116,115,114,113,112,111,110,109,108,107,106,105,104,103,102,101,
+        100,99,98,97,96,95,94,93,92,91,90,89,88,88,87,86,
+        85,84,83,82,81,80,80,79,78,77,76,75,74,74,73,72,
+        71,70,70,69,68,67,66,66,65,64,63,62,62,61,60,59,
+        59,58,57,56,56,55,54,53,53,52,51,50,50,49,48,48,
+        47,46,46,45,44,43,43,42,41,41,40,39,39,38,37,37,
+        36,35,35,34,33,33,32,32,31,30,30,29,28,28,27,27,
+        26,25,25,24,24,23,22,22,21,21,20,19,19,18,18,17,
+        17,16,15,15,14,14,13,13,12,12,11,10,10,9,9,8,
+        8,7,7,6,6,5,5,4,4,3,3,2,2,1,0,0
+      };
+
+      // For small denominators, it is cheaper to directly store the result.
+      //  For bigger ones, just ONE Newton-Raphson iteration is enough to get
+      //  maximum precision we need
+      static const uint32_t small_inv_tab[111] PROGMEM = {
+        16777216,16777216,8388608,5592405,4194304,3355443,2796202,2396745,2097152,1864135,1677721,1525201,1398101,1290555,1198372,1118481,
+        1048576,986895,932067,883011,838860,798915,762600,729444,699050,671088,645277,621378,599186,578524,559240,541200,
+        524288,508400,493447,479349,466033,453438,441505,430185,419430,409200,399457,390167,381300,372827,364722,356962,
+        349525,342392,335544,328965,322638,316551,310689,305040,299593,294337,289262,284359,279620,275036,270600,266305,
+        262144,258111,254200,250406,246723,243148,239674,236298,233016,229824,226719,223696,220752,217885,215092,212369,
+        209715,207126,204600,202135,199728,197379,195083,192841,190650,188508,186413,184365,182361,180400,178481,176602,
+        174762,172960,171196,169466,167772,166111,164482,162885,161319,159783,158275,156796,155344,153919,152520
+      };
+
+      // For small divisors, it is best to directly retrieve the results
+      if (d <= 110)
+        return pgm_read_dword(&small_inv_tab[d]);
+
+      register uint8_t r8 = d & 0xFF;
+      register uint8_t r9 = (d >> 8) & 0xFF;
+      register uint8_t r10 = (d >> 16) & 0xFF;
+      register uint8_t r2,r3,r4,r5,r6,r7,r11,r12,r13,r14,r15,r16,r17,r18;
+      register const uint8_t* ptab = inv_tab;
+
+      __asm__ __volatile__(
+        /*  %8:%7:%6 = interval*/
+        /*  r31:r30: MUST be those registers, and they must point to the inv_tab */
+
+        " clr %13" "\n\t"                 /* %13 = 0 */
+
+        /*  Now we must compute */
+        /*   result = 0xFFFFFF / d */
+        /*  %8:%7:%6 = interval*/
+        /*  %16:%15:%14 = nr */
+        /*  %13 = 0*/
+
+        /*  A plain division of 24x24 bits should take 388 cycles to complete. We will */
+        /*  use Newton-Raphson for the calculation, and will strive to get way less cycles*/
+        /*  for the same result - Using C division, it takes 500cycles to complete .*/
+
+        " clr %3" "\n\t"                  /* idx = 0 */
+        " mov %14,%6" "\n\t"
+        " mov %15,%7" "\n\t"
+        " mov %16,%8" "\n\t"              /* nr = interval */
+        " tst %16" "\n\t"                 /* nr & 0xFF0000 == 0 ? */
+        " brne 2f" "\n\t"                 /* No, skip this */
+        " mov %16,%15" "\n\t"
+        " mov %15,%14" "\n\t"             /* nr <<= 8, %14 not needed */
+        " subi %3,-8" "\n\t"              /* idx += 8 */
+        " tst %16" "\n\t"                 /* nr & 0xFF0000 == 0 ? */
+        " brne 2f" "\n\t"                 /* No, skip this */
+        " mov %16,%15" "\n\t"             /* nr <<= 8, %14 not needed */
+        " clr %15" "\n\t"                 /* We clear %14 */
+        " subi %3,-8" "\n\t"              /* idx += 8 */
+
+        /*  here %16 != 0 and %16:%15 contains at least 9 MSBits, or both %16:%15 are 0 */
+        "2:" "\n\t"
+        " cpi %16,0x10" "\n\t"            /* (nr & 0xf00000) == 0 ? */
+        " brcc 3f" "\n\t"                 /* No, skip this */
+        " swap %15" "\n\t"                /* Swap nibbles */
+        " swap %16" "\n\t"                /* Swap nibbles. Low nibble is 0 */
+        " mov %14, %15" "\n\t"
+        " andi %14,0x0f" "\n\t"           /* Isolate low nibble */
+        " andi %15,0xf0" "\n\t"           /* Keep proper nibble in %15 */
+        " or %16, %14" "\n\t"             /* %16:%15 <<= 4 */
+        " subi %3,-4" "\n\t"              /* idx += 4 */
+
+        "3:" "\n\t"
+        " cpi %16,0x40" "\n\t"            /* (nr & 0xc00000) == 0 ? */
+        " brcc 4f" "\n\t"                 /* No, skip this*/
+        " add %15,%15" "\n\t"
+        " adc %16,%16" "\n\t"
+        " add %15,%15" "\n\t"
+        " adc %16,%16" "\n\t"             /* %16:%15 <<= 2 */
+        " subi %3,-2" "\n\t"              /* idx += 2 */
+
+        "4:" "\n\t"
+        " cpi %16,0x80" "\n\t"            /* (nr & 0x800000) == 0 ? */
+        " brcc 5f" "\n\t"                 /* No, skip this */
+        " add %15,%15" "\n\t"
+        " adc %16,%16" "\n\t"             /* %16:%15 <<= 1 */
+        " inc %3" "\n\t"                  /* idx += 1 */
+
+        /*  Now %16:%15 contains its MSBit set to 1, or %16:%15 is == 0. We are now absolutely sure*/
+        /*  we have at least 9 MSBits available to enter the initial estimation table*/
+        "5:" "\n\t"
+        " add %15,%15" "\n\t"
+        " adc %16,%16" "\n\t"             /* %16:%15 = tidx = (nr <<= 1), we lose the top MSBit (always set to 1, %16 is the index into the inverse table)*/
+        " add r30,%16" "\n\t"             /* Only use top 8 bits */
+        " adc r31,%13" "\n\t"             /* r31:r30 = inv_tab + (tidx) */
+        " lpm %14, Z" "\n\t"              /* %14 = inv_tab[tidx] */
+        " ldi %15, 1" "\n\t"              /* %15 = 1  %15:%14 = inv_tab[tidx] + 256 */
+
+        /*  We must scale the approximation to the proper place*/
+        " clr %16" "\n\t"                 /* %16 will always be 0 here */
+        " subi %3,8" "\n\t"               /* idx == 8 ? */
+        " breq 6f" "\n\t"                 /* yes, no need to scale*/
+        " brcs 7f" "\n\t"                 /* If C=1, means idx < 8, result was negative!*/
+
+        /*  idx > 8, now %3 = idx - 8. We must perform a left shift. idx range:[1-8]*/
+        " sbrs %3,0" "\n\t"               /* shift by 1bit position?*/
+        " rjmp 8f" "\n\t"                 /* No*/
+        " add %14,%14" "\n\t"
+        " adc %15,%15" "\n\t"             /* %15:16 <<= 1*/
+        "8:" "\n\t"
+        " sbrs %3,1" "\n\t"               /* shift by 2bit position?*/
+        " rjmp 9f" "\n\t"                 /* No*/
+        " add %14,%14" "\n\t"
+        " adc %15,%15" "\n\t"
+        " add %14,%14" "\n\t"
+        " adc %15,%15" "\n\t"             /* %15:16 <<= 1*/
+        "9:" "\n\t"
+        " sbrs %3,2" "\n\t"               /* shift by 4bits position?*/
+        " rjmp 16f" "\n\t"                /* No*/
+        " swap %15" "\n\t"                /* Swap nibbles. lo nibble of %15 will always be 0*/
+        " swap %14" "\n\t"                /* Swap nibbles*/
+        " mov %12,%14" "\n\t"
+        " andi %12,0x0f" "\n\t"           /* isolate low nibble*/
+        " andi %14,0xf0" "\n\t"           /* and clear it*/
+        " or %15,%12" "\n\t"              /* %15:%16 <<= 4*/
+        "16:" "\n\t"
+        " sbrs %3,3" "\n\t"               /* shift by 8bits position?*/
+        " rjmp 6f" "\n\t"                 /* No, we are done */
+        " mov %16,%15" "\n\t"
+        " mov %15,%14" "\n\t"
+        " clr %14" "\n\t"
+        " jmp 6f" "\n\t"
+
+        /*  idx < 8, now %3 = idx - 8. Get the count of bits */
+        "7:" "\n\t"
+        " neg %3" "\n\t"                  /* %3 = -idx = count of bits to move right. idx range:[1...8]*/
+        " sbrs %3,0" "\n\t"               /* shift by 1 bit position ?*/
+        " rjmp 10f" "\n\t"                /* No, skip it*/
+        " asr %15" "\n\t"                 /* (bit7 is always 0 here)*/
+        " ror %14" "\n\t"
+        "10:" "\n\t"
+        " sbrs %3,1" "\n\t"               /* shift by 2 bit position ?*/
+        " rjmp 11f" "\n\t"                /* No, skip it*/
+        " asr %15" "\n\t"                 /* (bit7 is always 0 here)*/
+        " ror %14" "\n\t"
+        " asr %15" "\n\t"                 /* (bit7 is always 0 here)*/
+        " ror %14" "\n\t"
+        "11:" "\n\t"
+        " sbrs %3,2" "\n\t"               /* shift by 4 bit position ?*/
+        " rjmp 12f" "\n\t"                /* No, skip it*/
+        " swap %15" "\n\t"                /* Swap nibbles*/
+        " andi %14, 0xf0" "\n\t"          /* Lose the lowest nibble*/
+        " swap %14" "\n\t"                /* Swap nibbles. Upper nibble is 0*/
+        " or %14,%15" "\n\t"              /* Pass nibble from upper byte*/
+        " andi %15, 0x0f" "\n\t"          /* And get rid of that nibble*/
+        "12:" "\n\t"
+        " sbrs %3,3" "\n\t"               /* shift by 8 bit position ?*/
+        " rjmp 6f" "\n\t"                 /* No, skip it*/
+        " mov %14,%15" "\n\t"
+        " clr %15" "\n\t"
+        "6:" "\n\t"                       /* %16:%15:%14 = initial estimation of 0x1000000 / d*/
+
+        /*  Now, we must refine the estimation present on %16:%15:%14 using 1 iteration*/
+        /*   of Newton-Raphson. As it has a quadratic convergence, 1 iteration is enough*/
+        /*   to get more than 18bits of precision (the initial table lookup gives 9 bits of*/
+        /*   precision to start from). 18bits of precision is all what is needed here for result */
+
+        /*  %8:%7:%6 = d = interval*/
+        /*  %16:%15:%14 = x = initial estimation of 0x1000000 / d*/
+        /*  %13 = 0*/
+        /*  %3:%2:%1:%0 = working accumulator*/
+
+        /*  Compute 1<<25 - x*d. Result should never exceed 25 bits and should always be positive*/
+        " clr %0" "\n\t"
+        " clr %1" "\n\t"
+        " clr %2" "\n\t"
+        " ldi %3,2" "\n\t"                /* %3:%2:%1:%0 = 0x2000000*/
+        " mul %6,%14" "\n\t"              /* r1:r0 = LO(d) * LO(x)*/
+        " sub %0,r0" "\n\t"
+        " sbc %1,r1" "\n\t"
+        " sbc %2,%13" "\n\t"
+        " sbc %3,%13" "\n\t"              /* %3:%2:%1:%0 -= LO(d) * LO(x)*/
+        " mul %7,%14" "\n\t"              /* r1:r0 = MI(d) * LO(x)*/
+        " sub %1,r0" "\n\t"
+        " sbc %2,r1"  "\n\t"
+        " sbc %3,%13" "\n\t"              /* %3:%2:%1:%0 -= MI(d) * LO(x) << 8*/
+        " mul %8,%14" "\n\t"              /* r1:r0 = HI(d) * LO(x)*/
+        " sub %2,r0" "\n\t"
+        " sbc %3,r1" "\n\t"               /* %3:%2:%1:%0 -= MIL(d) * LO(x) << 16*/
+        " mul %6,%15" "\n\t"              /* r1:r0 = LO(d) * MI(x)*/
+        " sub %1,r0" "\n\t"
+        " sbc %2,r1" "\n\t"
+        " sbc %3,%13" "\n\t"              /* %3:%2:%1:%0 -= LO(d) * MI(x) << 8*/
+        " mul %7,%15" "\n\t"              /* r1:r0 = MI(d) * MI(x)*/
+        " sub %2,r0" "\n\t"
+        " sbc %3,r1" "\n\t"               /* %3:%2:%1:%0 -= MI(d) * MI(x) << 16*/
+        " mul %8,%15" "\n\t"              /* r1:r0 = HI(d) * MI(x)*/
+        " sub %3,r0" "\n\t"               /* %3:%2:%1:%0 -= MIL(d) * MI(x) << 24*/
+        " mul %6,%16" "\n\t"              /* r1:r0 = LO(d) * HI(x)*/
+        " sub %2,r0" "\n\t"
+        " sbc %3,r1" "\n\t"               /* %3:%2:%1:%0 -= LO(d) * HI(x) << 16*/
+        " mul %7,%16" "\n\t"              /* r1:r0 = MI(d) * HI(x)*/
+        " sub %3,r0" "\n\t"               /* %3:%2:%1:%0 -= MI(d) * HI(x) << 24*/
+        /*  %3:%2:%1:%0 = (1<<25) - x*d     [169]*/
+
+        /*  We need to multiply that result by x, and we are only interested in the top 24bits of that multiply*/
+
+        /*  %16:%15:%14 = x = initial estimation of 0x1000000 / d*/
+        /*  %3:%2:%1:%0 = (1<<25) - x*d = acc*/
+        /*  %13 = 0 */
+
+        /*  result = %11:%10:%9:%5:%4*/
+        " mul %14,%0" "\n\t"              /* r1:r0 = LO(x) * LO(acc)*/
+        " mov %4,r1" "\n\t"
+        " clr %5" "\n\t"
+        " clr %9" "\n\t"
+        " clr %10" "\n\t"
+        " clr %11" "\n\t"                 /* %11:%10:%9:%5:%4 = LO(x) * LO(acc) >> 8*/
+        " mul %15,%0" "\n\t"              /* r1:r0 = MI(x) * LO(acc)*/
+        " add %4,r0" "\n\t"
+        " adc %5,r1" "\n\t"
+        " adc %9,%13" "\n\t"
+        " adc %10,%13" "\n\t"
+        " adc %11,%13" "\n\t"             /* %11:%10:%9:%5:%4 += MI(x) * LO(acc) */
+        " mul %16,%0" "\n\t"              /* r1:r0 = HI(x) * LO(acc)*/
+        " add %5,r0" "\n\t"
+        " adc %9,r1" "\n\t"
+        " adc %10,%13" "\n\t"
+        " adc %11,%13" "\n\t"             /* %11:%10:%9:%5:%4 += MI(x) * LO(acc) << 8*/
+
+        " mul %14,%1" "\n\t"              /* r1:r0 = LO(x) * MIL(acc)*/
+        " add %4,r0" "\n\t"
+        " adc %5,r1" "\n\t"
+        " adc %9,%13" "\n\t"
+        " adc %10,%13" "\n\t"
+        " adc %11,%13" "\n\t"             /* %11:%10:%9:%5:%4 = LO(x) * MIL(acc)*/
+        " mul %15,%1" "\n\t"              /* r1:r0 = MI(x) * MIL(acc)*/
+        " add %5,r0" "\n\t"
+        " adc %9,r1" "\n\t"
+        " adc %10,%13" "\n\t"
+        " adc %11,%13" "\n\t"             /* %11:%10:%9:%5:%4 += MI(x) * MIL(acc) << 8*/
+        " mul %16,%1" "\n\t"              /* r1:r0 = HI(x) * MIL(acc)*/
+        " add %9,r0" "\n\t"
+        " adc %10,r1" "\n\t"
+        " adc %11,%13" "\n\t"             /* %11:%10:%9:%5:%4 += MI(x) * MIL(acc) << 16*/
+
+        " mul %14,%2" "\n\t"              /* r1:r0 = LO(x) * MIH(acc)*/
+        " add %5,r0" "\n\t"
+        " adc %9,r1" "\n\t"
+        " adc %10,%13" "\n\t"
+        " adc %11,%13" "\n\t"             /* %11:%10:%9:%5:%4 = LO(x) * MIH(acc) << 8*/
+        " mul %15,%2" "\n\t"              /* r1:r0 = MI(x) * MIH(acc)*/
+        " add %9,r0" "\n\t"
+        " adc %10,r1" "\n\t"
+        " adc %11,%13" "\n\t"             /* %11:%10:%9:%5:%4 += MI(x) * MIH(acc) << 16*/
+        " mul %16,%2" "\n\t"              /* r1:r0 = HI(x) * MIH(acc)*/
+        " add %10,r0" "\n\t"
+        " adc %11,r1" "\n\t"              /* %11:%10:%9:%5:%4 += MI(x) * MIH(acc) << 24*/
+
+        " mul %14,%3" "\n\t"              /* r1:r0 = LO(x) * HI(acc)*/
+        " add %9,r0" "\n\t"
+        " adc %10,r1" "\n\t"
+        " adc %11,%13" "\n\t"             /* %11:%10:%9:%5:%4 = LO(x) * HI(acc) << 16*/
+        " mul %15,%3" "\n\t"              /* r1:r0 = MI(x) * HI(acc)*/
+        " add %10,r0" "\n\t"
+        " adc %11,r1" "\n\t"              /* %11:%10:%9:%5:%4 += MI(x) * HI(acc) << 24*/
+        " mul %16,%3" "\n\t"              /* r1:r0 = HI(x) * HI(acc)*/
+        " add %11,r0" "\n\t"              /* %11:%10:%9:%5:%4 += MI(x) * HI(acc) << 32*/
+
+        /*  At this point, %11:%10:%9 contains the new estimation of x. */
+
+        /*  Finally, we must correct the result. Estimate remainder as*/
+        /*  (1<<24) - x*d*/
+        /*  %11:%10:%9 = x*/
+        /*  %8:%7:%6 = d = interval" "\n\t" /*  */
+        " ldi %3,1" "\n\t"
+        " clr %2" "\n\t"
+        " clr %1" "\n\t"
+        " clr %0" "\n\t"                  /* %3:%2:%1:%0 = 0x1000000*/
+        " mul %6,%9" "\n\t"              /* r1:r0 = LO(d) * LO(x)*/
+        " sub %0,r0" "\n\t"
+        " sbc %1,r1" "\n\t"
+        " sbc %2,%13" "\n\t"
+        " sbc %3,%13" "\n\t"              /* %3:%2:%1:%0 -= LO(d) * LO(x)*/
+        " mul %7,%9" "\n\t"              /* r1:r0 = MI(d) * LO(x)*/
+        " sub %1,r0" "\n\t"
+        " sbc %2,r1" "\n\t"
+        " sbc %3,%13" "\n\t"              /* %3:%2:%1:%0 -= MI(d) * LO(x) << 8*/
+        " mul %8,%9" "\n\t"              /* r1:r0 = HI(d) * LO(x)*/
+        " sub %2,r0" "\n\t"
+        " sbc %3,r1" "\n\t"               /* %3:%2:%1:%0 -= MIL(d) * LO(x) << 16*/
+        " mul %6,%10" "\n\t"              /* r1:r0 = LO(d) * MI(x)*/
+        " sub %1,r0" "\n\t"
+        " sbc %2,r1" "\n\t"
+        " sbc %3,%13" "\n\t"              /* %3:%2:%1:%0 -= LO(d) * MI(x) << 8*/
+        " mul %7,%10" "\n\t"              /* r1:r0 = MI(d) * MI(x)*/
+        " sub %2,r0" "\n\t"
+        " sbc %3,r1" "\n\t"               /* %3:%2:%1:%0 -= MI(d) * MI(x) << 16*/
+        " mul %8,%10" "\n\t"              /* r1:r0 = HI(d) * MI(x)*/
+        " sub %3,r0" "\n\t"               /* %3:%2:%1:%0 -= MIL(d) * MI(x) << 24*/
+        " mul %6,%11" "\n\t"              /* r1:r0 = LO(d) * HI(x)*/
+        " sub %2,r0" "\n\t"
+        " sbc %3,r1" "\n\t"               /* %3:%2:%1:%0 -= LO(d) * HI(x) << 16*/
+        " mul %7,%11" "\n\t"              /* r1:r0 = MI(d) * HI(x)*/
+        " sub %3,r0" "\n\t"               /* %3:%2:%1:%0 -= MI(d) * HI(x) << 24*/
+        /*  %3:%2:%1:%0 = r = (1<<24) - x*d*/
+        /*  %8:%7:%6 = d = interval */
+
+        /*  Perform the final correction*/
+        " sub %0,%6" "\n\t"
+        " sbc %1,%7" "\n\t"
+        " sbc %2,%8" "\n\t"              /* r -= d*/
+        " brcs 14f" "\n\t"                /* if ( r >= d) */
+
+        /*  %11:%10:%9 = x */
+        " ldi %3,1" "\n\t"
+        " add %9,%3" "\n\t"
+        " adc %10,%13" "\n\t"
+        " adc %11,%13" "\n\t"             /* x++*/
+        "14:" "\n\t"
+
+        /*  Estimation is done. %11:%10:%9 = x */
+        " clr __zero_reg__" "\n\t"        /* Make C runtime happy */
+        /*  [211 cycles total]*/
+        : "=r" (r2),
+          "=r" (r3),
+          "=r" (r4),
+          "=d" (r5),
+          "=r" (r6),
+          "=r" (r7),
+          "+r" (r8),
+          "+r" (r9),
+          "+r" (r10),
+          "=d" (r11),
+          "=r" (r12),
+          "=r" (r13),
+          "=d" (r14),
+          "=d" (r15),
+          "=d" (r16),
+          "=d" (r17),
+          "=d" (r18),
+          "+z" (ptab)
+        :
+        : "r0", "r1", "cc"
+      );
+
+      // Return the result
+      return r11 | (uint16_t(r12) << 8) | (uint32_t(r13) << 16);
+    }
+  #else
+    // All the other 32 CPUs can easily perform the inverse using hardware division,
+    // so we don´t need to reduce precision or to use assembly language at all.
+
+    // This routine, for all the other archs, returns 0x100000000 / d ~= 0xFFFFFFFF / d
+    static FORCE_INLINE uint32_t get_period_inverse(uint32_t d) {
+      return 0xFFFFFFFF / d;
+    }
+  #endif
+#endif
+
 #define MINIMAL_STEP_RATE 120
 
 /**
@@ -266,8 +787,13 @@ void Planner::calculate_trapezoid_for_block(block_t* const block, const float &e
 
   #if ENABLED(BEZIER_JERK_CONTROL)
     // Jerk controlled speed requires to express speed versus time, NOT steps
-    int32_t acceleration_time = ((float)(cruise_rate - initial_rate) / accel) * HAL_STEPPER_TIMER_RATE,
-            deceleration_time = ((float)(cruise_rate - final_rate) / accel) * HAL_STEPPER_TIMER_RATE;
+    uint32_t acceleration_time = ((float)(cruise_rate - initial_rate) / accel) * HAL_STEPPER_TIMER_RATE,
+             deceleration_time = ((float)(cruise_rate - final_rate) / accel) * HAL_STEPPER_TIMER_RATE;
+
+    // And to offload calculations from the ISR, we also calculate the inverse of those times here
+    uint32_t acceleration_time_inverse = get_period_inverse(acceleration_time);
+    uint32_t deceleration_time_inverse = get_period_inverse(deceleration_time);
+
   #endif
 
   CRITICAL_SECTION_START;  // Fill variables used by the stepper in a critical section
@@ -278,6 +804,8 @@ void Planner::calculate_trapezoid_for_block(block_t* const block, const float &e
     #if ENABLED(BEZIER_JERK_CONTROL)
       block->acceleration_time = acceleration_time;
       block->deceleration_time = deceleration_time;
+      block->acceleration_time_inverse = acceleration_time_inverse;
+      block->deceleration_time_inverse = deceleration_time_inverse;
       block->cruise_rate = cruise_rate;
     #endif
     block->final_rate = final_rate;
diff --git a/Marlin/src/module/planner.h b/Marlin/src/module/planner.h
index 0c752c1f87..17c133a1d0 100644
--- a/Marlin/src/module/planner.h
+++ b/Marlin/src/module/planner.h
@@ -96,8 +96,10 @@ typedef struct {
 
   #if ENABLED(BEZIER_JERK_CONTROL)
     uint32_t cruise_rate;                   // The actual cruise rate to use, between end of the acceleration phase and start of deceleration phase
-    int32_t acceleration_time,              // Acceleration time and deceleration time in STEP timer counts
-            deceleration_time;
+    uint32_t acceleration_time,             // Acceleration time and deceleration time in STEP timer counts
+             deceleration_time;
+    uint32_t acceleration_time_inverse,     // Inverse of acceleration and deceleration periods, expressed as integer. Scale depends on CPU being used
+             deceleration_time_inverse;
   #else
     int32_t acceleration_rate;              // The acceleration rate used for acceleration calculation
   #endif
diff --git a/Marlin/src/module/stepper.cpp b/Marlin/src/module/stepper.cpp
index 2b0974efa5..7be9c9ead2 100644
--- a/Marlin/src/module/stepper.cpp
+++ b/Marlin/src/module/stepper.cpp
@@ -117,11 +117,14 @@ long Stepper::counter_X = 0,
 volatile uint32_t Stepper::step_events_completed = 0; // The number of step events executed in the current block
 
 #if ENABLED(BEZIER_JERK_CONTROL)
-  int32_t Stepper::bezier_A,        // A coefficient in Bézier speed curve
-          Stepper::bezier_B,        // B coefficient in Bézier speed curve
-          Stepper::bezier_C,        // C coefficient in Bézier speed curve
-          Stepper::bezier_F;        // F coefficient in Bézier speed curve
-  uint32_t Stepper::bezier_AV;      // AV coefficient in Bézier speed curve
+  int32_t __attribute__((used)) Stepper::bezier_A __asm__("bezier_A");    // A coefficient in Bézier speed curve with alias for assembler
+  int32_t __attribute__((used)) Stepper::bezier_B __asm__("bezier_B");    // B coefficient in Bézier speed curve with alias for assembler
+  int32_t __attribute__((used)) Stepper::bezier_C __asm__("bezier_C");    // C coefficient in Bézier speed curve with alias for assembler
+  uint32_t __attribute__((used)) Stepper::bezier_F __asm__("bezier_F");   // F coefficient in Bézier speed curve with alias for assembler
+  uint32_t __attribute__((used)) Stepper::bezier_AV __asm__("bezier_AV"); // AV coefficient in Bézier speed curve with alias for assembler
+  #ifdef __AVR__
+    bool __attribute__((used)) Stepper::A_negative __asm__("A_negative");   // If A coefficient was negative
+  #endif
   bool Stepper::bezier_2nd_half;    // =false If Bézier curve has been initialized or not
 #endif
 
@@ -391,130 +394,735 @@ void Stepper::set_directions() {
    *
    *  Note the abbreviations we use in the following formulae are between []s
    *
-   *  At the start of each trapezoid, we calculate the coefficients A,B,C,F and Advance [AV], as follows:
+   *  For Any 32bit CPU:
    *
-   *   A =  6*128*(VF - VI) =  768*(VF - VI)
-   *   B = 15*128*(VI - VF) = 1920*(VI - VF)
-   *   C = 10*128*(VF - VI) = 1280*(VF - VI)
-   *   F =    128*VI        =  128*VI
-   *  AV = (1<<32)/TS      ~= 0xFFFFFFFF / TS (To use ARM UDIV, that is 32 bits)
+   *    At the start of each trapezoid, we calculate the coefficients A,B,C,F and Advance [AV], as follows:
    *
-   *  And for each point, we will evaluate the curve with the following sequence:
+   *      A =  6*128*(VF - VI) =  768*(VF - VI)
+   *      B = 15*128*(VI - VF) = 1920*(VI - VF)
+   *      C = 10*128*(VF - VI) = 1280*(VF - VI)
+   *      F =    128*VI        =  128*VI
+   *     AV = (1<<32)/TS      ~= 0xFFFFFFFF / TS (To use ARM UDIV, that is 32 bits) (this is computed at the planner, to offload expensive calculations from the ISR)
    *
-   *    uint32_t t = bezier_AV * curr_step;               // t: Range 0 - 1^32 = 32 bits
-   *    uint64_t f = t;
-   *    f *= t;                                           // Range 32*2 = 64 bits (unsigned)
-   *    f >>= 32;                                         // Range 32 bits  (unsigned)
-   *    f *= t;                                           // Range 32*2 = 64 bits  (unsigned)
-   *    f >>= 32;                                         // Range 32 bits : f = t^3  (unsigned)
-   *    int64_t acc = (int64_t) bezier_F << 31;           // Range 63 bits (signed)
-   *    acc += ((uint32_t) f >> 1) * (int64_t) bezier_C;  // Range 29bits + 31 = 60bits (plus sign)
-   *    f *= t;                                           // Range 32*2 = 64 bits
-   *    f >>= 32;                                         // Range 32 bits : f = t^3  (unsigned)
-   *    acc += ((uint32_t) f >> 1) * (int64_t) bezier_B;  // Range 29bits + 31 = 60bits (plus sign)
-   *    f *= t;                                           // Range 32*2 = 64 bits
-   *    f >>= 32;                                         // Range 32 bits : f = t^3  (unsigned)
-   *    acc += ((uint32_t) f >> 1) * (int64_t) bezier_A;  // Range 28bits + 31 = 59bits (plus sign)
-   *    acc >>= (31 + 7);                                 // Range 24bits (plus sign)
+   *   And for each point, we will evaluate the curve with the following sequence:
    *
-   * This can be translated to the following ARM assembly sequence:
+   *      void lsrs(uint32_t& d, uint32_t s, int cnt) {
+   *        d = s >> cnt;
+   *      }
+   *      void lsls(uint32_t& d, uint32_t s, int cnt) {
+   *        d = s << cnt;
+   *      }
+   *      void lsrs(int32_t& d, uint32_t s, int cnt) {
+   *        d = uint32_t(s) >> cnt;
+   *      }
+   *      void lsls(int32_t& d, uint32_t s, int cnt) {
+   *        d = uint32_t(s) << cnt;
+   *      }
+   *      void umull(uint32_t& rlo, uint32_t& rhi, uint32_t op1, uint32_t op2) {
+   *        uint64_t res = uint64_t(op1) * op2;
+   *        rlo = uint32_t(res & 0xFFFFFFFF);
+   *        rhi = uint32_t((res >> 32) & 0xFFFFFFFF);
+   *      }
+   *      void smlal(int32_t& rlo, int32_t& rhi, int32_t op1, int32_t op2) {
+   *        int64_t mul = int64_t(op1) * op2;
+   *        int64_t s = int64_t(uint32_t(rlo) | ((uint64_t(uint32_t(rhi)) << 32U)));
+   *        mul += s;
+   *        rlo = int32_t(mul & 0xFFFFFFFF);
+   *        rhi = int32_t((mul >> 32) & 0xFFFFFFFF);
+   *      }
+   *      int32_t _eval_bezier_curve_arm(uint32_t curr_step) {
+   *        register uint32_t flo = 0;
+   *        register uint32_t fhi = bezier_AV * curr_step;
+   *        register uint32_t t = fhi;
+   *        register int32_t alo = bezier_F;
+   *        register int32_t ahi = 0;
+   *        register int32_t A = bezier_A;
+   *        register int32_t B = bezier_B;
+   *        register int32_t C = bezier_C;
    *
-   * At start:
-   *  fhi = AV, flo = CS, alo = F
+   *        lsrs(ahi, alo, 1);          // a  = F << 31
+   *        lsls(alo, alo, 31);         //
+   *        umull(flo, fhi, fhi, t);    // f *= t
+   *        umull(flo, fhi, fhi, t);    // f>>=32; f*=t
+   *        lsrs(flo, fhi, 1);          //
+   *        smlal(alo, ahi, flo, C);    // a+=(f>>33)*C
+   *        umull(flo, fhi, fhi, t);    // f>>=32; f*=t
+   *        lsrs(flo, fhi, 1);          //
+   *        smlal(alo, ahi, flo, B);    // a+=(f>>33)*B
+   *        umull(flo, fhi, fhi, t);    // f>>=32; f*=t
+   *        lsrs(flo, fhi, 1);          // f>>=33;
+   *        smlal(alo, ahi, flo, A);    // a+=(f>>33)*A;
+   *        lsrs(alo, ahi, 6);          // a>>=38
+   *
+   *        return alo;
+   *      }
+   *
+   *    This will be rewritten in ARM assembly to get peak performance and will take 43 cycles to execute
+   *
+   *  For AVR, we scale precision of coefficients to make it possible to evaluate the Bézier curve in
+   *    realtime: Let's reduce precision as much as possible. After some experimentation we found that:
+   *
+   *    Assume t and AV with 24 bits is enough
+   *       A =  6*(VF - VI)
+   *       B = 15*(VI - VF)
+   *       C = 10*(VF - VI)
+   *       F =     VI
+   *      AV = (1<<24)/TS   (this is computed at the planner, to offload expensive calculations from the ISR)
+   *
+   *     Instead of storing sign for each coefficient, we will store its absolute value,
+   *    and flag the sign of the A coefficient, so we can save to store the sign bit.
+   *     It always holds that sign(A) = - sign(B) = sign(C)
+   *
+   *     So, the resulting range of the coefficients are:
+   *
+   *       t: unsigned (0 <= t < 1) |range 0 to 0xFFFFFF unsigned
+   *       A:   signed Q24 , range = 250000 * 6 = 1500000 = 0x16E360 | 21 bits
+   *       B:   signed Q24 , range = 250000 *15 = 3750000 = 0x393870 | 22 bits
+   *       C:   signed Q24 , range = 250000 *10 = 2500000 = 0x1312D0 | 21 bits
+   *       F:   signed Q24 , range = 250000     =  250000 = 0x0ED090 | 20 bits
+   *
+   *    And for each curve, we estimate its coefficients with:
+   *
+   *      void _calc_bezier_curve_coeffs(int32_t v0, int32_t v1, uint32_t av) {
+   *       // Calculate the Bézier coefficients
+   *       if (v1 < v0) {
+   *         A_negative = true;
+   *         bezier_A = 6 * (v0 - v1);
+   *         bezier_B = 15 * (v0 - v1);
+   *         bezier_C = 10 * (v0 - v1);
+   *       }
+   *       else {
+   *         A_negative = false;
+   *         bezier_A = 6 * (v1 - v0);
+   *         bezier_B = 15 * (v1 - v0);
+   *         bezier_C = 10 * (v1 - v0);
+   *       }
+   *       bezier_F = v0;
+   *      }
+   *
+   *    And for each point, we will evaluate the curve with the following sequence:
+   *
+   *      // unsigned multiplication of 24 bits x 24bits, return upper 16 bits
+   *      void umul24x24to16hi(uint16_t& r, uint24_t op1, uint24_t op2) {
+   *        r = (uint64_t(op1) * op2) >> 8;
+   *      }
+   *      // unsigned multiplication of 16 bits x 16bits, return upper 16 bits
+   *      void umul16x16to16hi(uint16_t& r, uint16_t op1, uint16_t op2) {
+   *        r = (uint32_t(op1) * op2) >> 16;
+   *      }
+   *      // unsigned multiplication of 16 bits x 24bits, return upper 24 bits
+   *      void umul16x24to24hi(uint24_t& r, uint16_t op1, uint24_t op2) {
+   *        r = uint24_t((uint64_t(op1) * op2) >> 16);
+   *      }
+   *
+   *      int32_t _eval_bezier_curve(uint32_t curr_step) {
+   *        // To save computing, the first step is always the initial speed
+   *        if (!curr_step)
+   *          return bezier_F;
+   *
+   *        uint16_t t;
+   *        umul24x24to16hi(t, bezier_AV, curr_step);   // t: Range 0 - 1^16 = 16 bits
+   *        uint16_t f = t;
+   *        umul16x16to16hi(f, f, t);           // Range 16 bits (unsigned)
+   *        umul16x16to16hi(f, f, t);           // Range 16 bits : f = t^3  (unsigned)
+   *        uint24_t acc = bezier_F;          // Range 20 bits (unsigned)
+   *        if (A_negative) {
+   *          uint24_t v;
+   *          umul16x24to24hi(v, f, bezier_C);    // Range 21bits
+   *          acc -= v;
+   *          umul16x16to16hi(f, f, t);         // Range 16 bits : f = t^4  (unsigned)
+   *          umul16x24to24hi(v, f, bezier_B);    // Range 22bits
+   *          acc += v;
+   *          umul16x16to16hi(f, f, t);         // Range 16 bits : f = t^5  (unsigned)
+   *          umul16x24to24hi(v, f, bezier_A);    // Range 21bits + 15 = 36bits (plus sign)
+   *          acc -= v;
+   *        }
+   *        else {
+   *          uint24_t v;
+   *          umul16x24to24hi(v, f, bezier_C);    // Range 21bits
+   *          acc += v;
+   *          umul16x16to16hi(f, f, t);       // Range 16 bits : f = t^4  (unsigned)
+   *          umul16x24to24hi(v, f, bezier_B);    // Range 22bits
+   *          acc -= v;
+   *          umul16x16to16hi(f, f, t);               // Range 16 bits : f = t^5  (unsigned)
+   *          umul16x24to24hi(v, f, bezier_A);    // Range 21bits + 15 = 36bits (plus sign)
+   *          acc += v;
+   *        }
+   *        return acc;
+   *      }
+   *    Those functions will be translated into assembler to get peak performance. coefficient calculations takes 70 cycles,
+   *    Bezier point evaluation takes 150 cycles
    *
-   *  muls  fhi,flo               | f = AV * CS       1 cycles
-   *  mov   t,fhi                 | t = AV * CS       1 cycles
-   *  lsrs  ahi,alo,#1            | a  = F << 31      1 cycles
-   *  lsls  alo,alo,#31           |                   1 cycles
-   *  umull flo,fhi,fhi,t         | f *= t            5 cycles [fhi:flo=64bits
-   *  umull flo,fhi,fhi,t         | f>>=32; f*=t      5 cycles [fhi:flo=64bits
-   *  lsrs  flo,fhi,#1            |                   1 cycles [31bits
-   *  smlal alo,ahi,flo,C         | a+=(f>>33)*C;     5 cycles
-   *  umull flo,fhi,fhi,t         | f>>=32; f*=t      5 cycles [fhi:flo=64bits
-   *  lsrs  flo,fhi,#1            |                   1 cycles [31bits
-   *  smlal alo,ahi,flo,B         | a+=(f>>33)*B;     5 cycles
-   *  umull flo,fhi,fhi,t         | f>>=32; f*=t      5 cycles [fhi:flo=64bits
-   *  lsrs  flo,fhi,#1            | f>>=33;           1 cycles [31bits
-   *  smlal alo,ahi,flo,A         | a+=(f>>33)*A;     5 cycles
-   *  lsrs  alo,ahi,#6            | a>>=38            1 cycles
-   *  43 cycles total
    */
 
-  FORCE_INLINE void Stepper::_calc_bezier_curve_coeffs(const int32_t v0, const int32_t v1, const uint32_t interval) {
-    // Calculate the Bézier coefficients
-    bezier_A =  768 * (v1 - v0);
-    bezier_B = 1920 * (v0 - v1);
-    bezier_C = 1280 * (v1 - v0);
-    bezier_F =  128 * v0;
-    bezier_AV = 0xFFFFFFFF / interval;
-  }
+  #ifdef __AVR__
 
-  FORCE_INLINE int32_t Stepper::_eval_bezier_curve(const uint32_t curr_step) {
-    #if defined(__ARM__) || defined(__thumb__)
+    // For AVR we use assembly to maximize speed
+    void Stepper::_calc_bezier_curve_coeffs(const int32_t v0, const int32_t v1, const uint32_t av) {
 
-      // For ARM CORTEX M3/M4 CPUs, we have the optimized assembler version, that takes 43 cycles to execute
-      register uint32_t flo = 0;
-      register uint32_t fhi = bezier_AV * curr_step;
-      register uint32_t t = fhi;
-      register int32_t alo = bezier_F;
-      register int32_t ahi = 0;
-      register int32_t A = bezier_A;
-      register int32_t B = bezier_B;
-      register int32_t C = bezier_C;
+      // Store advance
+      bezier_AV = av;
 
-       __asm__ __volatile__(
-        ".syntax unified"                   "\n\t"  // is to prevent CM0,CM1 non-unified syntax
-        " lsrs  %[ahi],%[alo],#1"           "\n\t"  // a  = F << 31      1 cycles
-        " lsls  %[alo],%[alo],#31"          "\n\t"  //                   1 cycles
-        " umull %[flo],%[fhi],%[fhi],%[t]"  "\n\t"  // f *= t            5 cycles [fhi:flo=64bits]
-        " umull %[flo],%[fhi],%[fhi],%[t]"  "\n\t"  // f>>=32; f*=t      5 cycles [fhi:flo=64bits]
-        " lsrs  %[flo],%[fhi],#1"           "\n\t"  //                   1 cycles [31bits]
-        " smlal %[alo],%[ahi],%[flo],%[C]"  "\n\t"  // a+=(f>>33)*C;     5 cycles
-        " umull %[flo],%[fhi],%[fhi],%[t]"  "\n\t"  // f>>=32; f*=t      5 cycles [fhi:flo=64bits]
-        " lsrs  %[flo],%[fhi],#1"           "\n\t"  //                   1 cycles [31bits]
-        " smlal %[alo],%[ahi],%[flo],%[B]"  "\n\t"  // a+=(f>>33)*B;     5 cycles
-        " umull %[flo],%[fhi],%[fhi],%[t]"  "\n\t"  // f>>=32; f*=t      5 cycles [fhi:flo=64bits]
-        " lsrs  %[flo],%[fhi],#1"           "\n\t"  // f>>=33;           1 cycles [31bits]
-        " smlal %[alo],%[ahi],%[flo],%[A]"  "\n\t"  // a+=(f>>33)*A;     5 cycles
-        " lsrs  %[alo],%[ahi],#6"           "\n\t"  // a>>=38            1 cycles
-        : [alo]"+r"( alo ) ,
-          [flo]"+r"( flo ) ,
-          [fhi]"+r"( fhi ) ,
-          [ahi]"+r"( ahi ) ,
-          [A]"+r"( A ) ,  // <== Note: Even if A, B, C, and t registers are INPUT ONLY
-          [B]"+r"( B ) ,  //  GCC does bad optimizations on the code if we list them as
-          [C]"+r"( C ) ,  //  such, breaking this function. So, to avoid that problem,
-          [t]"+r"( t )    //  we list all registers as input-outputs.
+      // Calculate the rest of the coefficients
+      register uint8_t r2 = v0 & 0xFF;
+      register uint8_t r3 = (v0 >> 8) & 0xFF;
+      register uint8_t r12 = (v0 >> 16) & 0xFF;
+      register uint8_t r5 = v1 & 0xFF;
+      register uint8_t r6 = (v1 >> 8) & 0xFF;
+      register uint8_t r7 = (v1 >> 16) & 0xFF;
+      register uint8_t r4,r8,r9,r10,r11;
+
+      __asm__ __volatile__(
+        /* Calculate the Bézier coefficients */
+        /*  %10:%1:%0 = v0*/
+        /*  %5:%4:%3 = v1*/
+        /*  %7:%6:%10 = temporary*/
+        /*  %9 = val (must be high register!)*/
+        /*  %10 (must be high register!)*/
+
+        /* Store initial velocity*/
+        " sts bezier_F, %0" "\n\t"
+        " sts bezier_F+1, %1" "\n\t"
+        " sts bezier_F+2, %10" "\n\t"    /* bezier_F = %10:%1:%0 = v0 */
+
+        /* Get delta speed */
+        " ldi %2,-1" "\n\t"              /* %2 = 0xff, means A_negative = true */
+        " clr %8" "\n\t"                 /* %8 = 0 */
+        " sub %0,%3" "\n\t"
+        " sbc %1,%4" "\n\t"
+        " sbc %10,%5" "\n\t"             /*  v0 -= v1, C=1 if result is negative */
+        " brcc 1f" "\n\t"                /* branch if result is positive (C=0), that means v0 >= v1 */
+
+        /*  Result was negative, get the absolute value*/
+        " com %10" "\n\t"
+        " com %1" "\n\t"
+        " neg %0" "\n\t"
+        " sbc %1,%2" "\n\t"
+        " sbc %10,%2" "\n\t"             /* %10:%1:%0 +1  -> %10:%1:%0 = -(v0 - v1) = (v1 - v0) */
+        " clr %2" "\n\t"                 /* %2 = 0, means A_negative = false */
+
+        /*  Store negative flag*/
+        "1:" "\n\t"
+        " sts A_negative, %2" "\n\t"     /* Store negative flag */
+
+        /*  Compute coefficients A,B and C   [20 cycles worst case]*/
+        " ldi %9,6" "\n\t"               /* %9 = 6 */
+        " mul %0,%9" "\n\t"              /* r1:r0 = 6*LO(v0-v1) */
+        " sts bezier_A, r0" "\n\t"
+        " mov %6,r1" "\n\t"
+        " clr %7" "\n\t"                 /* %7:%6:r0 = 6*LO(v0-v1) */
+        " mul %1,%9" "\n\t"              /* r1:r0 = 6*MI(v0-v1) */
+        " add %6,r0" "\n\t"
+        " adc %7,r1" "\n\t"              /* %7:%6:?? += 6*MI(v0-v1) << 8 */
+        " mul %10,%9" "\n\t"             /* r1:r0 = 6*HI(v0-v1) */
+        " add %7,r0" "\n\t"              /* %7:%6:?? += 6*HI(v0-v1) << 16 */
+        " sts bezier_A+1, %6" "\n\t"
+        " sts bezier_A+2, %7" "\n\t"     /* bezier_A = %7:%6:?? = 6*(v0-v1) [35 cycles worst] */
+
+        " ldi %9,15" "\n\t"              /* %9 = 15 */
+        " mul %0,%9" "\n\t"              /* r1:r0 = 5*LO(v0-v1) */
+        " sts bezier_B, r0" "\n\t"
+        " mov %6,r1" "\n\t"
+        " clr %7" "\n\t"                 /* %7:%6:?? = 5*LO(v0-v1) */
+        " mul %1,%9" "\n\t"              /* r1:r0 = 5*MI(v0-v1) */
+        " add %6,r0" "\n\t"
+        " adc %7,r1" "\n\t"              /* %7:%6:?? += 5*MI(v0-v1) << 8 */
+        " mul %10,%9" "\n\t"             /* r1:r0 = 5*HI(v0-v1) */
+        " add %7,r0" "\n\t"              /* %7:%6:?? += 5*HI(v0-v1) << 16 */
+        " sts bezier_B+1, %6" "\n\t"
+        " sts bezier_B+2, %7" "\n\t"     /* bezier_B = %7:%6:?? = 5*(v0-v1) [50 cycles worst] */
+
+        " ldi %9,10" "\n\t"              /* %9 = 10 */
+        " mul %0,%9" "\n\t"              /* r1:r0 = 10*LO(v0-v1) */
+        " sts bezier_C, r0" "\n\t"
+        " mov %6,r1" "\n\t"
+        " clr %7" "\n\t"                 /* %7:%6:?? = 10*LO(v0-v1) */
+        " mul %1,%9" "\n\t"              /* r1:r0 = 10*MI(v0-v1) */
+        " add %6,r0" "\n\t"
+        " adc %7,r1" "\n\t"              /* %7:%6:?? += 10*MI(v0-v1) << 8 */
+        " mul %10,%9" "\n\t"             /* r1:r0 = 10*HI(v0-v1) */
+        " add %7,r0" "\n\t"              /* %7:%6:?? += 10*HI(v0-v1) << 16 */
+        " sts bezier_C+1, %6" "\n\t"
+        " sts bezier_C+2, %7"            /* bezier_C = %7:%6:?? = 10*(v0-v1) [65 cycles worst] */
+        : "+r" (r2),
+          "+d" (r3),
+          "=r" (r4),
+          "+r" (r5),
+          "+r" (r6),
+          "+r" (r7),
+          "=r" (r8),
+          "=r" (r9),
+          "=r" (r10),
+          "=d" (r11),
+          "+r" (r12)
         :
-        : "cc"
+        : "r0", "r1", "cc", "memory"
       );
-      return alo;
+    }
 
-    #else
+    FORCE_INLINE int32_t Stepper::_eval_bezier_curve(const uint32_t curr_step) {
 
-      // For non ARM targets, we provide a fallback implementation. Really doubt it
-      // will be useful, unless the processor is extremely fast.
+      // If dealing with the first step, save expensive computing and return the initial speed
+      if (!curr_step)
+        return bezier_F;
 
-      uint32_t t = bezier_AV * curr_step;               // t: Range 0 - 1^32 = 32 bits
-      uint64_t f = t;
-      f *= t;                                           // Range 32*2 = 64 bits (unsigned)
-      f >>= 32;                                         // Range 32 bits  (unsigned)
-      f *= t;                                           // Range 32*2 = 64 bits  (unsigned)
-      f >>= 32;                                         // Range 32 bits : f = t^3  (unsigned)
-      int64_t acc = (int64_t) bezier_F << 31;           // Range 63 bits (signed)
-      acc += ((uint32_t) f >> 1) * (int64_t) bezier_C;  // Range 29bits + 31 = 60bits (plus sign)
-      f *= t;                                           // Range 32*2 = 64 bits
-      f >>= 32;                                         // Range 32 bits : f = t^3  (unsigned)
-      acc += ((uint32_t) f >> 1) * (int64_t) bezier_B;  // Range 29bits + 31 = 60bits (plus sign)
-      f *= t;                                           // Range 32*2 = 64 bits
-      f >>= 32;                                         // Range 32 bits : f = t^3  (unsigned)
-      acc += ((uint32_t) f >> 1) * (int64_t) bezier_A;  // Range 28bits + 31 = 59bits (plus sign)
-      acc >>= (31 + 7);                                 // Range 24bits (plus sign)
-      return (int32_t) acc;
+      register uint8_t r0 = 0; /* Zero register */
+      register uint8_t r2 = (curr_step) & 0xFF;
+      register uint8_t r3 = (curr_step >> 8) & 0xFF;
+      register uint8_t r4 = (curr_step >> 16) & 0xFF;
+      register uint8_t r1,r5,r6,r7,r8,r9,r10,r11; /* Temporary registers */
 
-    #endif
-  }
+      __asm__ __volatile(
+        /* umul24x24to16hi(t, bezier_AV, curr_step);  t: Range 0 - 1^16 = 16 bits*/
+        " lds %9,bezier_AV" "\n\t"       /* %9 = LO(AV)*/
+        " mul %9,%2" "\n\t"              /* r1:r0 = LO(bezier_AV)*LO(curr_step)*/
+        " mov %7,r1" "\n\t"              /* %7 = LO(bezier_AV)*LO(curr_step) >> 8*/
+        " clr %8" "\n\t"                 /* %8:%7  = LO(bezier_AV)*LO(curr_step) >> 8*/
+        " lds %10,bezier_AV+1" "\n\t"    /* %10 = MI(AV)*/
+        " mul %10,%2" "\n\t"             /* r1:r0  = MI(bezier_AV)*LO(curr_step)*/
+        " add %7,r0" "\n\t"
+        " adc %8,r1" "\n\t"              /* %8:%7 += MI(bezier_AV)*LO(curr_step)*/
+        " lds r1,bezier_AV+2" "\n\t"     /* r11 = HI(AV)*/
+        " mul r1,%2" "\n\t"              /* r1:r0  = HI(bezier_AV)*LO(curr_step)*/
+        " add %8,r0" "\n\t"              /* %8:%7 += HI(bezier_AV)*LO(curr_step) << 8*/
+        " mul %9,%3" "\n\t"              /* r1:r0 =  LO(bezier_AV)*MI(curr_step)*/
+        " add %7,r0" "\n\t"
+        " adc %8,r1" "\n\t"              /* %8:%7 += LO(bezier_AV)*MI(curr_step)*/
+        " mul %10,%3" "\n\t"             /* r1:r0 =  MI(bezier_AV)*MI(curr_step)*/
+        " add %8,r0" "\n\t"              /* %8:%7 += LO(bezier_AV)*MI(curr_step) << 8*/
+        " mul %9,%4" "\n\t"              /* r1:r0 =  LO(bezier_AV)*HI(curr_step)*/
+        " add %8,r0" "\n\t"              /* %8:%7 += LO(bezier_AV)*HI(curr_step) << 8*/
+        /* %8:%7 = t*/
 
+        /* uint16_t f = t;*/
+        " mov %5,%7" "\n\t"              /* %6:%5 = f*/
+        " mov %6,%8" "\n\t"
+        /* %6:%5 = f*/
+
+        /* umul16x16to16hi(f, f, t); / Range 16 bits (unsigned) [17] */
+        " mul %5,%7" "\n\t"              /* r1:r0 = LO(f) * LO(t)*/
+        " mov %9,r1" "\n\t"              /* store MIL(LO(f) * LO(t)) in %9, we need it for rounding*/
+        " clr %10" "\n\t"                /* %10 = 0*/
+        " clr %11" "\n\t"                /* %11 = 0*/
+        " mul %5,%8" "\n\t"              /* r1:r0 = LO(f) * HI(t)*/
+        " add %9,r0" "\n\t"              /* %9 += LO(LO(f) * HI(t))*/
+        " adc %10,r1" "\n\t"             /* %10 = HI(LO(f) * HI(t))*/
+        " adc %11,%0" "\n\t"             /* %11 += carry*/
+        " mul %6,%7" "\n\t"              /* r1:r0 = HI(f) * LO(t)*/
+        " add %9,r0" "\n\t"              /* %9 += LO(HI(f) * LO(t))*/
+        " adc %10,r1" "\n\t"             /* %10 += HI(HI(f) * LO(t)) */
+        " adc %11,%0" "\n\t"             /* %11 += carry*/
+        " mul %6,%8" "\n\t"              /* r1:r0 = HI(f) * HI(t)*/
+        " add %10,r0" "\n\t"             /* %10 += LO(HI(f) * HI(t))*/
+        " adc %11,r1" "\n\t"             /* %11 += HI(HI(f) * HI(t))*/
+        " mov %5,%10" "\n\t"             /* %6:%5 = */
+        " mov %6,%11" "\n\t"             /* f = %10:%11*/
+
+        /* umul16x16to16hi(f, f, t); / Range 16 bits : f = t^3  (unsigned) [17]*/
+        " mul %5,%7" "\n\t"              /* r1:r0 = LO(f) * LO(t)*/
+        " mov %1,r1" "\n\t"              /* store MIL(LO(f) * LO(t)) in %1, we need it for rounding*/
+        " clr %10" "\n\t"                /* %10 = 0*/
+        " clr %11" "\n\t"                /* %11 = 0*/
+        " mul %5,%8" "\n\t"              /* r1:r0 = LO(f) * HI(t)*/
+        " add %1,r0" "\n\t"              /* %1 += LO(LO(f) * HI(t))*/
+        " adc %10,r1" "\n\t"             /* %10 = HI(LO(f) * HI(t))*/
+        " adc %11,%0" "\n\t"             /* %11 += carry*/
+        " mul %6,%7" "\n\t"              /* r1:r0 = HI(f) * LO(t)*/
+        " add %1,r0" "\n\t"              /* %1 += LO(HI(f) * LO(t))*/
+        " adc %10,r1" "\n\t"             /* %10 += HI(HI(f) * LO(t))*/
+        " adc %11,%0" "\n\t"             /* %11 += carry*/
+        " mul %6,%8" "\n\t"              /* r1:r0 = HI(f) * HI(t)*/
+        " add %10,r0" "\n\t"             /* %10 += LO(HI(f) * HI(t))*/
+        " adc %11,r1" "\n\t"             /* %11 += HI(HI(f) * HI(t))*/
+        " mov %5,%10" "\n\t"             /* %6:%5 =*/
+        " mov %6,%11" "\n\t"             /* f = %10:%11*/
+        /* [15 +17*2] = [49]*/
+
+        /* %4:%3:%2 will be acc from now on*/
+
+        /* uint24_t acc = bezier_F; / Range 20 bits (unsigned)*/
+        " clr %9" "\n\t"                 /* "decimal place we get for free"*/
+        " lds %2,bezier_F" "\n\t"
+        " lds %3,bezier_F+1" "\n\t"
+        " lds %4,bezier_F+2" "\n\t"      /* %4:%3:%2 = acc*/
+
+        /* if (A_negative) {*/
+        " lds r0,A_negative" "\n\t"
+        " or r0,%0" "\n\t"               /* Is flag signalling negative? */
+        " brne 3f" "\n\t"                /* If yes, Skip next instruction if A was negative*/
+        " rjmp 1f" "\n\t"                /* Otherwise, jump */
+
+        /* uint24_t v; */
+        /* umul16x24to24hi(v, f, bezier_C); / Range 21bits [29] */
+        /* acc -= v; */
+        "3:" "\n\t"
+        " lds %10, bezier_C" "\n\t"      /* %10 = LO(bezier_C)*/
+        " mul %10,%5" "\n\t"             /* r1:r0 = LO(bezier_C) * LO(f)*/
+        " sub %9,r1" "\n\t"
+        " sbc %2,%0" "\n\t"
+        " sbc %3,%0" "\n\t"
+        " sbc %4,%0" "\n\t"              /* %4:%3:%2:%9 -= HI(LO(bezier_C) * LO(f))*/
+        " lds %11, bezier_C+1" "\n\t"    /* %11 = MI(bezier_C)*/
+        " mul %11,%5" "\n\t"             /* r1:r0 = MI(bezier_C) * LO(f)*/
+        " sub %9,r0" "\n\t"
+        " sbc %2,r1" "\n\t"
+        " sbc %3,%0" "\n\t"
+        " sbc %4,%0" "\n\t"              /* %4:%3:%2:%9 -= MI(bezier_C) * LO(f)*/
+        " lds %1, bezier_C+2" "\n\t"     /* %1 = HI(bezier_C)*/
+        " mul %1,%5" "\n\t"              /* r1:r0 = MI(bezier_C) * LO(f)*/
+        " sub %2,r0" "\n\t"
+        " sbc %3,r1" "\n\t"
+        " sbc %4,%0" "\n\t"              /* %4:%3:%2:%9 -= HI(bezier_C) * LO(f) << 8*/
+        " mul %10,%6" "\n\t"             /* r1:r0 = LO(bezier_C) * MI(f)*/
+        " sub %9,r0" "\n\t"
+        " sbc %2,r1" "\n\t"
+        " sbc %3,%0" "\n\t"
+        " sbc %4,%0" "\n\t"              /* %4:%3:%2:%9 -= LO(bezier_C) * MI(f)*/
+        " mul %11,%6" "\n\t"             /* r1:r0 = MI(bezier_C) * MI(f)*/
+        " sub %2,r0" "\n\t"
+        " sbc %3,r1" "\n\t"
+        " sbc %4,%0" "\n\t"              /* %4:%3:%2:%9 -= MI(bezier_C) * MI(f) << 8*/
+        " mul %1,%6" "\n\t"              /* r1:r0 = HI(bezier_C) * LO(f)*/
+        " sub %3,r0" "\n\t"
+        " sbc %4,r1" "\n\t"              /* %4:%3:%2:%9 -= HI(bezier_C) * LO(f) << 16*/
+
+        /* umul16x16to16hi(f, f, t); / Range 16 bits : f = t^3  (unsigned) [17]*/
+        " mul %5,%7" "\n\t"              /* r1:r0 = LO(f) * LO(t)*/
+        " mov %1,r1" "\n\t"              /* store MIL(LO(f) * LO(t)) in %1, we need it for rounding*/
+        " clr %10" "\n\t"                /* %10 = 0*/
+        " clr %11" "\n\t"                /* %11 = 0*/
+        " mul %5,%8" "\n\t"              /* r1:r0 = LO(f) * HI(t)*/
+        " add %1,r0" "\n\t"              /* %1 += LO(LO(f) * HI(t))*/
+        " adc %10,r1" "\n\t"             /* %10 = HI(LO(f) * HI(t))*/
+        " adc %11,%0" "\n\t"             /* %11 += carry*/
+        " mul %6,%7" "\n\t"              /* r1:r0 = HI(f) * LO(t)*/
+        " add %1,r0" "\n\t"              /* %1 += LO(HI(f) * LO(t))*/
+        " adc %10,r1" "\n\t"             /* %10 += HI(HI(f) * LO(t))*/
+        " adc %11,%0" "\n\t"             /* %11 += carry*/
+        " mul %6,%8" "\n\t"              /* r1:r0 = HI(f) * HI(t)*/
+        " add %10,r0" "\n\t"             /* %10 += LO(HI(f) * HI(t))*/
+        " adc %11,r1" "\n\t"             /* %11 += HI(HI(f) * HI(t))*/
+        " mov %5,%10" "\n\t"             /* %6:%5 =*/
+        " mov %6,%11" "\n\t"             /* f = %10:%11*/
+
+        /* umul16x24to24hi(v, f, bezier_B); / Range 22bits [29]*/
+        /* acc += v; */
+        " lds %10, bezier_B" "\n\t"      /* %10 = LO(bezier_B)*/
+        " mul %10,%5" "\n\t"             /* r1:r0 = LO(bezier_B) * LO(f)*/
+        " add %9,r1" "\n\t"
+        " adc %2,%0" "\n\t"
+        " adc %3,%0" "\n\t"
+        " adc %4,%0" "\n\t"              /* %4:%3:%2:%9 += HI(LO(bezier_B) * LO(f))*/
+        " lds %11, bezier_B+1" "\n\t"    /* %11 = MI(bezier_B)*/
+        " mul %11,%5" "\n\t"             /* r1:r0 = MI(bezier_B) * LO(f)*/
+        " add %9,r0" "\n\t"
+        " adc %2,r1" "\n\t"
+        " adc %3,%0" "\n\t"
+        " adc %4,%0" "\n\t"              /* %4:%3:%2:%9 += MI(bezier_B) * LO(f)*/
+        " lds %1, bezier_B+2" "\n\t"     /* %1 = HI(bezier_B)*/
+        " mul %1,%5" "\n\t"              /* r1:r0 = MI(bezier_B) * LO(f)*/
+        " add %2,r0" "\n\t"
+        " adc %3,r1" "\n\t"
+        " adc %4,%0" "\n\t"              /* %4:%3:%2:%9 += HI(bezier_B) * LO(f) << 8*/
+        " mul %10,%6" "\n\t"             /* r1:r0 = LO(bezier_B) * MI(f)*/
+        " add %9,r0" "\n\t"
+        " adc %2,r1" "\n\t"
+        " adc %3,%0" "\n\t"
+        " adc %4,%0" "\n\t"              /* %4:%3:%2:%9 += LO(bezier_B) * MI(f)*/
+        " mul %11,%6" "\n\t"             /* r1:r0 = MI(bezier_B) * MI(f)*/
+        " add %2,r0" "\n\t"
+        " adc %3,r1" "\n\t"
+        " adc %4,%0" "\n\t"              /* %4:%3:%2:%9 += MI(bezier_B) * MI(f) << 8*/
+        " mul %1,%6" "\n\t"              /* r1:r0 = HI(bezier_B) * LO(f)*/
+        " add %3,r0" "\n\t"
+        " adc %4,r1" "\n\t"              /* %4:%3:%2:%9 += HI(bezier_B) * LO(f) << 16*/
+
+        /* umul16x16to16hi(f, f, t); / Range 16 bits : f = t^5  (unsigned) [17]*/
+        " mul %5,%7" "\n\t"              /* r1:r0 = LO(f) * LO(t)*/
+        " mov %1,r1" "\n\t"              /* store MIL(LO(f) * LO(t)) in %1, we need it for rounding*/
+        " clr %10" "\n\t"                /* %10 = 0*/
+        " clr %11" "\n\t"                /* %11 = 0*/
+        " mul %5,%8" "\n\t"              /* r1:r0 = LO(f) * HI(t)*/
+        " add %1,r0" "\n\t"              /* %1 += LO(LO(f) * HI(t))*/
+        " adc %10,r1" "\n\t"             /* %10 = HI(LO(f) * HI(t))*/
+        " adc %11,%0" "\n\t"             /* %11 += carry*/
+        " mul %6,%7" "\n\t"              /* r1:r0 = HI(f) * LO(t)*/
+        " add %1,r0" "\n\t"              /* %1 += LO(HI(f) * LO(t))*/
+        " adc %10,r1" "\n\t"             /* %10 += HI(HI(f) * LO(t))*/
+        " adc %11,%0" "\n\t"             /* %11 += carry*/
+        " mul %6,%8" "\n\t"              /* r1:r0 = HI(f) * HI(t)*/
+        " add %10,r0" "\n\t"             /* %10 += LO(HI(f) * HI(t))*/
+        " adc %11,r1" "\n\t"             /* %11 += HI(HI(f) * HI(t))*/
+        " mov %5,%10" "\n\t"             /* %6:%5 =*/
+        " mov %6,%11" "\n\t"             /* f = %10:%11*/
+
+        /* umul16x24to24hi(v, f, bezier_A); / Range 21bits [29]*/
+        /* acc -= v; */
+        " lds %10, bezier_A" "\n\t"      /* %10 = LO(bezier_A)*/
+        " mul %10,%5" "\n\t"             /* r1:r0 = LO(bezier_A) * LO(f)*/
+        " sub %9,r1" "\n\t"
+        " sbc %2,%0" "\n\t"
+        " sbc %3,%0" "\n\t"
+        " sbc %4,%0" "\n\t"              /* %4:%3:%2:%9 -= HI(LO(bezier_A) * LO(f))*/
+        " lds %11, bezier_A+1" "\n\t"    /* %11 = MI(bezier_A)*/
+        " mul %11,%5" "\n\t"             /* r1:r0 = MI(bezier_A) * LO(f)*/
+        " sub %9,r0" "\n\t"
+        " sbc %2,r1" "\n\t"
+        " sbc %3,%0" "\n\t"
+        " sbc %4,%0" "\n\t"              /* %4:%3:%2:%9 -= MI(bezier_A) * LO(f)*/
+        " lds %1, bezier_A+2" "\n\t"     /* %1 = HI(bezier_A)*/
+        " mul %1,%5" "\n\t"              /* r1:r0 = MI(bezier_A) * LO(f)*/
+        " sub %2,r0" "\n\t"
+        " sbc %3,r1" "\n\t"
+        " sbc %4,%0" "\n\t"              /* %4:%3:%2:%9 -= HI(bezier_A) * LO(f) << 8*/
+        " mul %10,%6" "\n\t"             /* r1:r0 = LO(bezier_A) * MI(f)*/
+        " sub %9,r0" "\n\t"
+        " sbc %2,r1" "\n\t"
+        " sbc %3,%0" "\n\t"
+        " sbc %4,%0" "\n\t"              /* %4:%3:%2:%9 -= LO(bezier_A) * MI(f)*/
+        " mul %11,%6" "\n\t"             /* r1:r0 = MI(bezier_A) * MI(f)*/
+        " sub %2,r0" "\n\t"
+        " sbc %3,r1" "\n\t"
+        " sbc %4,%0" "\n\t"              /* %4:%3:%2:%9 -= MI(bezier_A) * MI(f) << 8*/
+        " mul %1,%6" "\n\t"              /* r1:r0 = HI(bezier_A) * LO(f)*/
+        " sub %3,r0" "\n\t"
+        " sbc %4,r1" "\n\t"              /* %4:%3:%2:%9 -= HI(bezier_A) * LO(f) << 16*/
+        " jmp 2f" "\n\t"                 /* Done!*/
+
+        "1:" "\n\t"
+
+        /* uint24_t v; */
+        /* umul16x24to24hi(v, f, bezier_C); / Range 21bits [29]*/
+        /* acc += v; */
+        " lds %10, bezier_C" "\n\t"      /* %10 = LO(bezier_C)*/
+        " mul %10,%5" "\n\t"             /* r1:r0 = LO(bezier_C) * LO(f)*/
+        " add %9,r1" "\n\t"
+        " adc %2,%0" "\n\t"
+        " adc %3,%0" "\n\t"
+        " adc %4,%0" "\n\t"              /* %4:%3:%2:%9 += HI(LO(bezier_C) * LO(f))*/
+        " lds %11, bezier_C+1" "\n\t"    /* %11 = MI(bezier_C)*/
+        " mul %11,%5" "\n\t"             /* r1:r0 = MI(bezier_C) * LO(f)*/
+        " add %9,r0" "\n\t"
+        " adc %2,r1" "\n\t"
+        " adc %3,%0" "\n\t"
+        " adc %4,%0" "\n\t"              /* %4:%3:%2:%9 += MI(bezier_C) * LO(f)*/
+        " lds %1, bezier_C+2" "\n\t"     /* %1 = HI(bezier_C)*/
+        " mul %1,%5" "\n\t"              /* r1:r0 = MI(bezier_C) * LO(f)*/
+        " add %2,r0" "\n\t"
+        " adc %3,r1" "\n\t"
+        " adc %4,%0" "\n\t"              /* %4:%3:%2:%9 += HI(bezier_C) * LO(f) << 8*/
+        " mul %10,%6" "\n\t"             /* r1:r0 = LO(bezier_C) * MI(f)*/
+        " add %9,r0" "\n\t"
+        " adc %2,r1" "\n\t"
+        " adc %3,%0" "\n\t"
+        " adc %4,%0" "\n\t"              /* %4:%3:%2:%9 += LO(bezier_C) * MI(f)*/
+        " mul %11,%6" "\n\t"             /* r1:r0 = MI(bezier_C) * MI(f)*/
+        " add %2,r0" "\n\t"
+        " adc %3,r1" "\n\t"
+        " adc %4,%0" "\n\t"              /* %4:%3:%2:%9 += MI(bezier_C) * MI(f) << 8*/
+        " mul %1,%6" "\n\t"              /* r1:r0 = HI(bezier_C) * LO(f)*/
+        " add %3,r0" "\n\t"
+        " adc %4,r1" "\n\t"              /* %4:%3:%2:%9 += HI(bezier_C) * LO(f) << 16*/
+
+        /* umul16x16to16hi(f, f, t); / Range 16 bits : f = t^3  (unsigned) [17]*/
+        " mul %5,%7" "\n\t"              /* r1:r0 = LO(f) * LO(t)*/
+        " mov %1,r1" "\n\t"              /* store MIL(LO(f) * LO(t)) in %1, we need it for rounding*/
+        " clr %10" "\n\t"                /* %10 = 0*/
+        " clr %11" "\n\t"                /* %11 = 0*/
+        " mul %5,%8" "\n\t"              /* r1:r0 = LO(f) * HI(t)*/
+        " add %1,r0" "\n\t"              /* %1 += LO(LO(f) * HI(t))*/
+        " adc %10,r1" "\n\t"             /* %10 = HI(LO(f) * HI(t))*/
+        " adc %11,%0" "\n\t"             /* %11 += carry*/
+        " mul %6,%7" "\n\t"              /* r1:r0 = HI(f) * LO(t)*/
+        " add %1,r0" "\n\t"              /* %1 += LO(HI(f) * LO(t))*/
+        " adc %10,r1" "\n\t"             /* %10 += HI(HI(f) * LO(t))*/
+        " adc %11,%0" "\n\t"             /* %11 += carry*/
+        " mul %6,%8" "\n\t"              /* r1:r0 = HI(f) * HI(t)*/
+        " add %10,r0" "\n\t"             /* %10 += LO(HI(f) * HI(t))*/
+        " adc %11,r1" "\n\t"             /* %11 += HI(HI(f) * HI(t))*/
+        " mov %5,%10" "\n\t"             /* %6:%5 =*/
+        " mov %6,%11" "\n\t"             /* f = %10:%11*/
+
+        /* umul16x24to24hi(v, f, bezier_B); / Range 22bits [29]*/
+        /* acc -= v;*/
+        " lds %10, bezier_B" "\n\t"      /* %10 = LO(bezier_B)*/
+        " mul %10,%5" "\n\t"             /* r1:r0 = LO(bezier_B) * LO(f)*/
+        " sub %9,r1" "\n\t"
+        " sbc %2,%0" "\n\t"
+        " sbc %3,%0" "\n\t"
+        " sbc %4,%0" "\n\t"              /* %4:%3:%2:%9 -= HI(LO(bezier_B) * LO(f))*/
+        " lds %11, bezier_B+1" "\n\t"    /* %11 = MI(bezier_B)*/
+        " mul %11,%5" "\n\t"             /* r1:r0 = MI(bezier_B) * LO(f)*/
+        " sub %9,r0" "\n\t"
+        " sbc %2,r1" "\n\t"
+        " sbc %3,%0" "\n\t"
+        " sbc %4,%0" "\n\t"              /* %4:%3:%2:%9 -= MI(bezier_B) * LO(f)*/
+        " lds %1, bezier_B+2" "\n\t"     /* %1 = HI(bezier_B)*/
+        " mul %1,%5" "\n\t"              /* r1:r0 = MI(bezier_B) * LO(f)*/
+        " sub %2,r0" "\n\t"
+        " sbc %3,r1" "\n\t"
+        " sbc %4,%0" "\n\t"              /* %4:%3:%2:%9 -= HI(bezier_B) * LO(f) << 8*/
+        " mul %10,%6" "\n\t"             /* r1:r0 = LO(bezier_B) * MI(f)*/
+        " sub %9,r0" "\n\t"
+        " sbc %2,r1" "\n\t"
+        " sbc %3,%0" "\n\t"
+        " sbc %4,%0" "\n\t"              /* %4:%3:%2:%9 -= LO(bezier_B) * MI(f)*/
+        " mul %11,%6" "\n\t"             /* r1:r0 = MI(bezier_B) * MI(f)*/
+        " sub %2,r0" "\n\t"
+        " sbc %3,r1" "\n\t"
+        " sbc %4,%0" "\n\t"              /* %4:%3:%2:%9 -= MI(bezier_B) * MI(f) << 8*/
+        " mul %1,%6" "\n\t"              /* r1:r0 = HI(bezier_B) * LO(f)*/
+        " sub %3,r0" "\n\t"
+        " sbc %4,r1" "\n\t"              /* %4:%3:%2:%9 -= HI(bezier_B) * LO(f) << 16*/
+
+        /* umul16x16to16hi(f, f, t); / Range 16 bits : f = t^5  (unsigned) [17]*/
+        " mul %5,%7" "\n\t"              /* r1:r0 = LO(f) * LO(t)*/
+        " mov %1,r1" "\n\t"              /* store MIL(LO(f) * LO(t)) in %1, we need it for rounding*/
+        " clr %10" "\n\t"                /* %10 = 0*/
+        " clr %11" "\n\t"                /* %11 = 0*/
+        " mul %5,%8" "\n\t"              /* r1:r0 = LO(f) * HI(t)*/
+        " add %1,r0" "\n\t"              /* %1 += LO(LO(f) * HI(t))*/
+        " adc %10,r1" "\n\t"             /* %10 = HI(LO(f) * HI(t))*/
+        " adc %11,%0" "\n\t"             /* %11 += carry*/
+        " mul %6,%7" "\n\t"              /* r1:r0 = HI(f) * LO(t)*/
+        " add %1,r0" "\n\t"              /* %1 += LO(HI(f) * LO(t))*/
+        " adc %10,r1" "\n\t"             /* %10 += HI(HI(f) * LO(t))*/
+        " adc %11,%0" "\n\t"             /* %11 += carry*/
+        " mul %6,%8" "\n\t"              /* r1:r0 = HI(f) * HI(t)*/
+        " add %10,r0" "\n\t"             /* %10 += LO(HI(f) * HI(t))*/
+        " adc %11,r1" "\n\t"             /* %11 += HI(HI(f) * HI(t))*/
+        " mov %5,%10" "\n\t"             /* %6:%5 =*/
+        " mov %6,%11" "\n\t"             /* f = %10:%11*/
+
+        /* umul16x24to24hi(v, f, bezier_A); / Range 21bits [29]*/
+        /* acc += v; */
+        " lds %10, bezier_A" "\n\t"      /* %10 = LO(bezier_A)*/
+        " mul %10,%5" "\n\t"             /* r1:r0 = LO(bezier_A) * LO(f)*/
+        " add %9,r1" "\n\t"
+        " adc %2,%0" "\n\t"
+        " adc %3,%0" "\n\t"
+        " adc %4,%0" "\n\t"              /* %4:%3:%2:%9 += HI(LO(bezier_A) * LO(f))*/
+        " lds %11, bezier_A+1" "\n\t"    /* %11 = MI(bezier_A)*/
+        " mul %11,%5" "\n\t"             /* r1:r0 = MI(bezier_A) * LO(f)*/
+        " add %9,r0" "\n\t"
+        " adc %2,r1" "\n\t"
+        " adc %3,%0" "\n\t"
+        " adc %4,%0" "\n\t"              /* %4:%3:%2:%9 += MI(bezier_A) * LO(f)*/
+        " lds %1, bezier_A+2" "\n\t"     /* %1 = HI(bezier_A)*/
+        " mul %1,%5" "\n\t"              /* r1:r0 = MI(bezier_A) * LO(f)*/
+        " add %2,r0" "\n\t"
+        " adc %3,r1" "\n\t"
+        " adc %4,%0" "\n\t"              /* %4:%3:%2:%9 += HI(bezier_A) * LO(f) << 8*/
+        " mul %10,%6" "\n\t"             /* r1:r0 = LO(bezier_A) * MI(f)*/
+        " add %9,r0" "\n\t"
+        " adc %2,r1" "\n\t"
+        " adc %3,%0" "\n\t"
+        " adc %4,%0" "\n\t"              /* %4:%3:%2:%9 += LO(bezier_A) * MI(f)*/
+        " mul %11,%6" "\n\t"             /* r1:r0 = MI(bezier_A) * MI(f)*/
+        " add %2,r0" "\n\t"
+        " adc %3,r1" "\n\t"
+        " adc %4,%0" "\n\t"              /* %4:%3:%2:%9 += MI(bezier_A) * MI(f) << 8*/
+        " mul %1,%6" "\n\t"              /* r1:r0 = HI(bezier_A) * LO(f)*/
+        " add %3,r0" "\n\t"
+        " adc %4,r1" "\n\t"              /* %4:%3:%2:%9 += HI(bezier_A) * LO(f) << 16*/
+        "2:" "\n\t"
+        " clr __zero_reg__"              /* C runtime expects r1 = __zero_reg__ = 0 */
+        : "+r"(r0),
+          "+r"(r1),
+          "+r"(r2),
+          "+r"(r3),
+          "+r"(r4),
+          "+r"(r5),
+          "+r"(r6),
+          "+r"(r7),
+          "+r"(r8),
+          "+r"(r9),
+          "+r"(r10),
+          "+r"(r11)
+        :
+        :"cc","r0","r1"
+      );
+      return (r2 | (uint16_t(r3) << 8)) | (uint32_t(r4) << 16);
+    }
+
+  #else
+
+    // For all the other 32bit CPUs
+    FORCE_INLINE void Stepper::_calc_bezier_curve_coeffs(const int32_t v0, const int32_t v1, const uint32_t av) {
+      // Calculate the Bézier coefficients
+      bezier_A =  768 * (v1 - v0);
+      bezier_B = 1920 * (v0 - v1);
+      bezier_C = 1280 * (v1 - v0);
+      bezier_F =  128 * v0;
+      bezier_AV = av;
+    }
+
+    FORCE_INLINE int32_t Stepper::_eval_bezier_curve(const uint32_t curr_step) {
+      #if defined(__ARM__) || defined(__thumb__)
+
+        // For ARM Cortex M3/M4 CPUs, we have the optimized assembler version, that takes 43 cycles to execute
+        register uint32_t flo = 0;
+        register uint32_t fhi = bezier_AV * curr_step;
+        register uint32_t t = fhi;
+        register int32_t alo = bezier_F;
+        register int32_t ahi = 0;
+        register int32_t A = bezier_A;
+        register int32_t B = bezier_B;
+        register int32_t C = bezier_C;
+
+         __asm__ __volatile__(
+          ".syntax unified"                   "\n\t"  // is to prevent CM0,CM1 non-unified syntax
+          " lsrs  %[ahi],%[alo],#1"           "\n\t"  // a  = F << 31      1 cycles
+          " lsls  %[alo],%[alo],#31"          "\n\t"  //                   1 cycles
+          " umull %[flo],%[fhi],%[fhi],%[t]"  "\n\t"  // f *= t            5 cycles [fhi:flo=64bits]
+          " umull %[flo],%[fhi],%[fhi],%[t]"  "\n\t"  // f>>=32; f*=t      5 cycles [fhi:flo=64bits]
+          " lsrs  %[flo],%[fhi],#1"           "\n\t"  //                   1 cycles [31bits]
+          " smlal %[alo],%[ahi],%[flo],%[C]"  "\n\t"  // a+=(f>>33)*C;     5 cycles
+          " umull %[flo],%[fhi],%[fhi],%[t]"  "\n\t"  // f>>=32; f*=t      5 cycles [fhi:flo=64bits]
+          " lsrs  %[flo],%[fhi],#1"           "\n\t"  //                   1 cycles [31bits]
+          " smlal %[alo],%[ahi],%[flo],%[B]"  "\n\t"  // a+=(f>>33)*B;     5 cycles
+          " umull %[flo],%[fhi],%[fhi],%[t]"  "\n\t"  // f>>=32; f*=t      5 cycles [fhi:flo=64bits]
+          " lsrs  %[flo],%[fhi],#1"           "\n\t"  // f>>=33;           1 cycles [31bits]
+          " smlal %[alo],%[ahi],%[flo],%[A]"  "\n\t"  // a+=(f>>33)*A;     5 cycles
+          " lsrs  %[alo],%[ahi],#6"           "\n\t"  // a>>=38            1 cycles
+          : [alo]"+r"( alo ) ,
+            [flo]"+r"( flo ) ,
+            [fhi]"+r"( fhi ) ,
+            [ahi]"+r"( ahi ) ,
+            [A]"+r"( A ) ,  // <== Note: Even if A, B, C, and t registers are INPUT ONLY
+            [B]"+r"( B ) ,  //  GCC does bad optimizations on the code if we list them as
+            [C]"+r"( C ) ,  //  such, breaking this function. So, to avoid that problem,
+            [t]"+r"( t )    //  we list all registers as input-outputs.
+          :
+          : "cc"
+        );
+        return alo;
+
+      #else
+
+        // For non ARM targets, we provide a fallback implementation. Really doubt it
+        // will be useful, unless the processor is fast and 32bit
+
+        uint32_t t = bezier_AV * curr_step;               // t: Range 0 - 1^32 = 32 bits
+        uint64_t f = t;
+        f *= t;                                           // Range 32*2 = 64 bits (unsigned)
+        f >>= 32;                                         // Range 32 bits  (unsigned)
+        f *= t;                                           // Range 32*2 = 64 bits  (unsigned)
+        f >>= 32;                                         // Range 32 bits : f = t^3  (unsigned)
+        int64_t acc = (int64_t) bezier_F << 31;           // Range 63 bits (signed)
+        acc += ((uint32_t) f >> 1) * (int64_t) bezier_C;  // Range 29bits + 31 = 60bits (plus sign)
+        f *= t;                                           // Range 32*2 = 64 bits
+        f >>= 32;                                         // Range 32 bits : f = t^3  (unsigned)
+        acc += ((uint32_t) f >> 1) * (int64_t) bezier_B;  // Range 29bits + 31 = 60bits (plus sign)
+        f *= t;                                           // Range 32*2 = 64 bits
+        f >>= 32;                                         // Range 32 bits : f = t^3  (unsigned)
+        acc += ((uint32_t) f >> 1) * (int64_t) bezier_A;  // Range 28bits + 31 = 59bits (plus sign)
+        acc >>= (31 + 7);                                 // Range 24bits (plus sign)
+        return (int32_t) acc;
+
+      #endif
+    }
+  #endif
 #endif // BEZIER_JERK_CONTROL
 
 /**
@@ -660,7 +1268,7 @@ void Stepper::isr() {
 
       #if ENABLED(BEZIER_JERK_CONTROL)
         // Initialize the Bézier speed curve
-        _calc_bezier_curve_coeffs(current_block->initial_rate, current_block->cruise_rate, current_block->acceleration_time);
+        _calc_bezier_curve_coeffs(current_block->initial_rate, current_block->cruise_rate, current_block->acceleration_time_inverse);
 
         // We have not started the 2nd half of the trapezoid
         bezier_2nd_half = false;
@@ -953,7 +1561,7 @@ void Stepper::isr() {
       if (!bezier_2nd_half) {
 
         // Initialize the Bézier speed curve
-        _calc_bezier_curve_coeffs(current_block->cruise_rate, current_block->final_rate, current_block->deceleration_time);
+        _calc_bezier_curve_coeffs(current_block->cruise_rate, current_block->final_rate, current_block->deceleration_time_inverse);
         bezier_2nd_half = true;
       }
 
diff --git a/Marlin/src/module/stepper.h b/Marlin/src/module/stepper.h
index d7fca16f2b..88bbab8743 100644
--- a/Marlin/src/module/stepper.h
+++ b/Marlin/src/module/stepper.h
@@ -98,12 +98,15 @@ class Stepper {
     static volatile uint32_t step_events_completed; // The number of step events executed in the current block
 
     #if ENABLED(BEZIER_JERK_CONTROL)
-      static int32_t bezier_A,        // A coefficient in Bézier speed curve
-                     bezier_B,        // B coefficient in Bézier speed curve
-                     bezier_C,        // C coefficient in Bézier speed curve
-                     bezier_F;        // F coefficient in Bézier speed curve
-      static uint32_t bezier_AV;      // AV coefficient in Bézier speed curve
-      static bool bezier_2nd_half;    // If Bézier curve has been initialized or not
+      static int32_t bezier_A,     // A coefficient in Bézier speed curve
+                     bezier_B,     // B coefficient in Bézier speed curve
+                     bezier_C;     // C coefficient in Bézier speed curve
+      static uint32_t bezier_F;    // F coefficient in Bézier speed curve
+      static uint32_t bezier_AV;   // AV coefficient in Bézier speed curve
+      #ifdef __AVR__
+        static bool A_negative;    // If A coefficient was negative
+      #endif
+      static bool bezier_2nd_half; // If Bézier curve has been initialized or not
     #endif
 
     #if ENABLED(LIN_ADVANCE)
@@ -361,7 +364,7 @@ class Stepper {
     }
 
     #if ENABLED(BEZIER_JERK_CONTROL)
-      static void _calc_bezier_curve_coeffs(const int32_t v0, const int32_t v1, const uint32_t steps);
+      static void _calc_bezier_curve_coeffs(const int32_t v0, const int32_t v1, const uint32_t av);
       static int32_t _eval_bezier_curve(const uint32_t curr_step);
     #endif