2016-05-25 17:37:40 +02:00
|
|
|
// Stepper pulse schedule compression
|
|
|
|
//
|
2017-04-07 17:47:24 +02:00
|
|
|
// Copyright (C) 2016,2017 Kevin O'Connor <kevin@koconnor.net>
|
2016-05-25 17:37:40 +02:00
|
|
|
//
|
|
|
|
// This file may be distributed under the terms of the GNU GPLv3 license.
|
|
|
|
//
|
|
|
|
// The goal of this code is to take a series of scheduled stepper
|
|
|
|
// pulse times and compress them into a handful of commands that can
|
|
|
|
// be efficiently transmitted and executed on a microcontroller (mcu).
|
|
|
|
// The mcu accepts step pulse commands that take interval, count, and
|
|
|
|
// add parameters such that 'count' pulses occur, with each step event
|
|
|
|
// calculating the next step event time using:
|
|
|
|
// next_wake_time = last_wake_time + interval; interval += add
|
|
|
|
// This code is writtin in C (instead of python) for processing
|
|
|
|
// efficiency - the repetitive integer math is vastly faster in C.
|
|
|
|
|
|
|
|
#include <math.h> // sqrt
|
|
|
|
#include <stddef.h> // offsetof
|
|
|
|
#include <stdint.h> // uint32_t
|
2016-12-13 16:44:26 +01:00
|
|
|
#include <stdio.h> // fprintf
|
2016-05-25 17:37:40 +02:00
|
|
|
#include <stdlib.h> // malloc
|
|
|
|
#include <string.h> // memset
|
2016-11-30 18:04:28 +01:00
|
|
|
#include "pyhelper.h" // errorf
|
2016-05-25 17:37:40 +02:00
|
|
|
#include "serialqueue.h" // struct queue_message
|
|
|
|
|
|
|
|
#define CHECK_LINES 1
|
|
|
|
#define QUEUE_START_SIZE 1024
|
|
|
|
|
|
|
|
struct stepcompress {
|
|
|
|
// Buffer management
|
2017-08-30 18:42:53 +02:00
|
|
|
uint32_t *queue, *queue_end, *queue_pos, *queue_next;
|
2016-05-25 17:37:40 +02:00
|
|
|
// Internal tracking
|
2016-11-02 00:38:37 +01:00
|
|
|
uint32_t max_error;
|
2016-05-25 17:37:40 +02:00
|
|
|
// Message generation
|
2016-12-30 23:02:28 +01:00
|
|
|
uint64_t last_step_clock, homing_clock;
|
2016-05-25 17:37:40 +02:00
|
|
|
struct list_head msg_queue;
|
2016-11-10 18:44:04 +01:00
|
|
|
uint32_t queue_step_msgid, set_next_step_dir_msgid, oid;
|
|
|
|
int sdir, invert_sdir;
|
2016-05-25 17:37:40 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/****************************************************************
|
|
|
|
* Step compression
|
|
|
|
****************************************************************/
|
|
|
|
|
|
|
|
#define DIV_UP(n,d) (((n) + (d) - 1) / (d))
|
|
|
|
|
|
|
|
static inline int32_t
|
|
|
|
idiv_up(int32_t n, int32_t d)
|
|
|
|
{
|
|
|
|
return (n>=0) ? DIV_UP(n,d) : (n/d);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int32_t
|
|
|
|
idiv_down(int32_t n, int32_t d)
|
|
|
|
{
|
|
|
|
return (n>=0) ? (n/d) : (n - d + 1) / d;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct points {
|
|
|
|
int32_t minp, maxp;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Given a requested step time, return the minimum and maximum
|
|
|
|
// acceptable times
|
2016-12-13 16:44:26 +01:00
|
|
|
static inline struct points
|
2017-08-30 18:42:53 +02:00
|
|
|
minmax_point(struct stepcompress *sc, uint32_t *pos)
|
2016-05-25 17:37:40 +02:00
|
|
|
{
|
2017-08-30 18:42:53 +02:00
|
|
|
uint32_t lsc = sc->last_step_clock;
|
|
|
|
uint32_t prevpoint = pos > sc->queue_pos ? *(pos-1) - lsc : 0;
|
|
|
|
uint32_t point = *pos - lsc;
|
2016-05-25 17:37:40 +02:00
|
|
|
uint32_t max_error = (point - prevpoint) / 2;
|
|
|
|
if (max_error > sc->max_error)
|
|
|
|
max_error = sc->max_error;
|
|
|
|
return (struct points){ point - max_error, point };
|
|
|
|
}
|
|
|
|
|
|
|
|
// The maximum add delta between two valid quadratic sequences of the
|
|
|
|
// form "add*count*(count-1)/2 + interval*count" is "(6 + 4*sqrt(2)) *
|
|
|
|
// maxerror / (count*count)". The "6 + 4*sqrt(2)" is 11.65685, but
|
2016-12-14 16:59:15 +01:00
|
|
|
// using 11 works well in practice.
|
2016-05-25 17:37:40 +02:00
|
|
|
#define QUADRATIC_DEV 11
|
|
|
|
|
|
|
|
struct step_move {
|
|
|
|
uint32_t interval;
|
|
|
|
uint16_t count;
|
|
|
|
int16_t add;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Find a 'step_move' that covers a series of step times
|
|
|
|
static struct step_move
|
|
|
|
compress_bisect_add(struct stepcompress *sc)
|
|
|
|
{
|
2017-08-30 18:42:53 +02:00
|
|
|
uint32_t *qlast = sc->queue_next;
|
|
|
|
if (qlast > sc->queue_pos + 65535)
|
|
|
|
qlast = sc->queue_pos + 65535;
|
2016-05-25 17:37:40 +02:00
|
|
|
struct points point = minmax_point(sc, sc->queue_pos);
|
2016-09-22 22:40:00 +02:00
|
|
|
int32_t outer_mininterval = point.minp, outer_maxinterval = point.maxp;
|
2016-12-14 16:59:15 +01:00
|
|
|
int32_t add = 0, minadd = -0x8000, maxadd = 0x7fff;
|
2016-12-07 01:01:00 +01:00
|
|
|
int32_t bestinterval = 0, bestcount = 1, bestadd = 1, bestreach = INT32_MIN;
|
2016-12-19 03:56:30 +01:00
|
|
|
int32_t zerointerval = 0, zerocount = 0;
|
2016-05-25 17:37:40 +02:00
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
// Find longest valid sequence with the given 'add'
|
2016-12-13 16:44:26 +01:00
|
|
|
struct points nextpoint;
|
|
|
|
int32_t nextmininterval = outer_mininterval;
|
|
|
|
int32_t nextmaxinterval = outer_maxinterval, interval = nextmaxinterval;
|
|
|
|
int32_t nextcount = 1;
|
2016-05-25 17:37:40 +02:00
|
|
|
for (;;) {
|
2016-12-13 16:44:26 +01:00
|
|
|
nextcount++;
|
2017-08-30 18:42:53 +02:00
|
|
|
if (&sc->queue_pos[nextcount-1] >= qlast) {
|
2016-12-13 16:44:26 +01:00
|
|
|
int32_t count = nextcount - 1;
|
|
|
|
return (struct step_move){ interval, count, add };
|
2016-11-02 04:08:16 +01:00
|
|
|
}
|
2016-12-13 16:44:26 +01:00
|
|
|
nextpoint = minmax_point(sc, sc->queue_pos + nextcount - 1);
|
2016-12-26 18:47:17 +01:00
|
|
|
int32_t nextaddfactor = nextcount*(nextcount-1)/2;
|
2016-12-13 16:44:26 +01:00
|
|
|
int32_t c = add*nextaddfactor;
|
|
|
|
if (nextmininterval*nextcount < nextpoint.minp - c)
|
|
|
|
nextmininterval = DIV_UP(nextpoint.minp - c, nextcount);
|
|
|
|
if (nextmaxinterval*nextcount > nextpoint.maxp - c)
|
|
|
|
nextmaxinterval = (nextpoint.maxp - c) / nextcount;
|
2016-05-25 17:37:40 +02:00
|
|
|
if (nextmininterval > nextmaxinterval)
|
|
|
|
break;
|
2016-12-13 16:44:26 +01:00
|
|
|
interval = nextmaxinterval;
|
2016-05-25 17:37:40 +02:00
|
|
|
}
|
2016-09-22 22:40:00 +02:00
|
|
|
|
|
|
|
// Check if this is the best sequence found so far
|
2016-12-26 18:47:17 +01:00
|
|
|
int32_t count = nextcount - 1, addfactor = count*(count-1)/2;
|
2016-12-13 16:44:26 +01:00
|
|
|
int32_t reach = add*addfactor + interval*count;
|
2016-12-19 04:11:20 +01:00
|
|
|
if (reach > bestreach
|
|
|
|
|| (reach == bestreach && interval > bestinterval)) {
|
2016-12-13 16:44:26 +01:00
|
|
|
bestinterval = interval;
|
2016-05-25 17:37:40 +02:00
|
|
|
bestcount = count;
|
|
|
|
bestadd = add;
|
2016-09-22 22:40:00 +02:00
|
|
|
bestreach = reach;
|
2016-12-19 03:56:30 +01:00
|
|
|
if (!add) {
|
|
|
|
zerointerval = interval;
|
|
|
|
zerocount = count;
|
|
|
|
}
|
2016-12-26 18:47:17 +01:00
|
|
|
if (count > 0x200)
|
|
|
|
// No 'add' will improve sequence; avoid integer overflow
|
|
|
|
break;
|
2016-05-25 17:37:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check if a greater or lesser add could extend the sequence
|
2016-12-26 18:47:17 +01:00
|
|
|
int32_t nextaddfactor = nextcount*(nextcount-1)/2;
|
2016-12-13 16:44:26 +01:00
|
|
|
int32_t nextreach = add*nextaddfactor + interval*nextcount;
|
|
|
|
if (nextreach < nextpoint.minp) {
|
2016-12-14 16:59:15 +01:00
|
|
|
minadd = add + 1;
|
2016-12-13 16:44:26 +01:00
|
|
|
outer_maxinterval = nextmaxinterval;
|
2016-07-06 19:19:21 +02:00
|
|
|
} else {
|
2016-12-14 16:59:15 +01:00
|
|
|
maxadd = add - 1;
|
2016-12-13 16:44:26 +01:00
|
|
|
outer_mininterval = nextmininterval;
|
2016-07-06 19:19:21 +02:00
|
|
|
}
|
2016-05-25 17:37:40 +02:00
|
|
|
|
|
|
|
// The maximum valid deviation between two quadratic sequences
|
|
|
|
// can be calculated and used to further limit the add range.
|
|
|
|
if (count > 1) {
|
2016-12-14 16:59:15 +01:00
|
|
|
int32_t errdelta = sc->max_error*QUADRATIC_DEV / (count*count);
|
2016-05-25 17:37:40 +02:00
|
|
|
if (minadd < add - errdelta)
|
|
|
|
minadd = add - errdelta;
|
|
|
|
if (maxadd > add + errdelta)
|
|
|
|
maxadd = add + errdelta;
|
|
|
|
}
|
|
|
|
|
2016-09-22 22:40:00 +02:00
|
|
|
// See if next point would further limit the add range
|
2016-12-13 16:44:26 +01:00
|
|
|
int32_t c = outer_maxinterval * nextcount;
|
2016-12-14 16:59:15 +01:00
|
|
|
if (minadd*nextaddfactor < nextpoint.minp - c)
|
|
|
|
minadd = idiv_up(nextpoint.minp - c, nextaddfactor);
|
2016-12-13 16:44:26 +01:00
|
|
|
c = outer_mininterval * nextcount;
|
2016-12-14 16:59:15 +01:00
|
|
|
if (maxadd*nextaddfactor > nextpoint.maxp - c)
|
|
|
|
maxadd = idiv_down(nextpoint.maxp - c, nextaddfactor);
|
2016-09-22 22:40:00 +02:00
|
|
|
|
2016-05-25 17:37:40 +02:00
|
|
|
// Bisect valid add range and try again with new 'add'
|
2016-12-14 16:59:15 +01:00
|
|
|
if (minadd > maxadd)
|
2016-05-25 17:37:40 +02:00
|
|
|
break;
|
2016-12-13 16:53:13 +01:00
|
|
|
add = maxadd - (maxadd - minadd) / 4;
|
2016-05-25 17:37:40 +02:00
|
|
|
}
|
2016-12-19 03:56:30 +01:00
|
|
|
if (zerocount + zerocount/16 >= bestcount)
|
|
|
|
// Prefer add=0 if it's similar to the best found sequence
|
|
|
|
return (struct step_move){ zerointerval, zerocount, 0 };
|
2016-05-25 17:37:40 +02:00
|
|
|
return (struct step_move){ bestinterval, bestcount, bestadd };
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/****************************************************************
|
|
|
|
* Step compress checking
|
|
|
|
****************************************************************/
|
|
|
|
|
2017-02-06 17:37:03 +01:00
|
|
|
#define ERROR_RET -989898989
|
|
|
|
|
2016-05-25 17:37:40 +02:00
|
|
|
// Verify that a given 'step_move' matches the actual step times
|
2017-02-06 17:37:03 +01:00
|
|
|
static int
|
2016-05-25 17:37:40 +02:00
|
|
|
check_line(struct stepcompress *sc, struct step_move move)
|
|
|
|
{
|
|
|
|
if (!CHECK_LINES)
|
2017-02-06 17:37:03 +01:00
|
|
|
return 0;
|
2017-08-30 18:42:53 +02:00
|
|
|
if (!move.count || (!move.interval && !move.add && move.count > 1)
|
2016-12-13 16:46:46 +01:00
|
|
|
|| move.interval >= 0x80000000) {
|
2017-03-12 17:29:20 +01:00
|
|
|
errorf("stepcompress o=%d i=%d c=%d a=%d: Invalid sequence"
|
|
|
|
, sc->oid, move.interval, move.count, move.add);
|
2017-02-06 17:37:03 +01:00
|
|
|
return ERROR_RET;
|
2016-05-25 17:37:40 +02:00
|
|
|
}
|
2016-11-02 04:08:16 +01:00
|
|
|
uint32_t interval = move.interval, p = 0;
|
2016-05-25 17:37:40 +02:00
|
|
|
uint16_t i;
|
|
|
|
for (i=0; i<move.count; i++) {
|
|
|
|
struct points point = minmax_point(sc, sc->queue_pos + i);
|
2016-11-02 04:08:16 +01:00
|
|
|
p += interval;
|
2016-05-25 17:37:40 +02:00
|
|
|
if (p < point.minp || p > point.maxp) {
|
2017-03-12 17:29:20 +01:00
|
|
|
errorf("stepcompress o=%d i=%d c=%d a=%d: Point %d: %d not in %d:%d"
|
|
|
|
, sc->oid, move.interval, move.count, move.add
|
|
|
|
, i+1, p, point.minp, point.maxp);
|
2017-02-06 17:37:03 +01:00
|
|
|
return ERROR_RET;
|
2016-05-25 17:37:40 +02:00
|
|
|
}
|
2016-11-02 04:08:16 +01:00
|
|
|
if (interval >= 0x80000000) {
|
2017-03-12 17:29:20 +01:00
|
|
|
errorf("stepcompress o=%d i=%d c=%d a=%d:"
|
|
|
|
" Point %d: interval overflow %d"
|
|
|
|
, sc->oid, move.interval, move.count, move.add
|
|
|
|
, i+1, interval);
|
2017-02-06 17:37:03 +01:00
|
|
|
return ERROR_RET;
|
2016-11-02 04:08:16 +01:00
|
|
|
}
|
2016-05-25 17:37:40 +02:00
|
|
|
interval += move.add;
|
|
|
|
}
|
2017-02-06 17:37:03 +01:00
|
|
|
return 0;
|
2016-05-25 17:37:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/****************************************************************
|
|
|
|
* Step compress interface
|
|
|
|
****************************************************************/
|
|
|
|
|
|
|
|
// Allocate a new 'stepcompress' object
|
|
|
|
struct stepcompress *
|
2016-11-10 18:44:04 +01:00
|
|
|
stepcompress_alloc(uint32_t max_error, uint32_t queue_step_msgid
|
|
|
|
, uint32_t set_next_step_dir_msgid, uint32_t invert_sdir
|
|
|
|
, uint32_t oid)
|
2016-05-25 17:37:40 +02:00
|
|
|
{
|
|
|
|
struct stepcompress *sc = malloc(sizeof(*sc));
|
|
|
|
memset(sc, 0, sizeof(*sc));
|
|
|
|
sc->max_error = max_error;
|
|
|
|
list_init(&sc->msg_queue);
|
|
|
|
sc->queue_step_msgid = queue_step_msgid;
|
2016-11-10 18:44:04 +01:00
|
|
|
sc->set_next_step_dir_msgid = set_next_step_dir_msgid;
|
2016-05-25 17:37:40 +02:00
|
|
|
sc->oid = oid;
|
2016-11-10 18:44:04 +01:00
|
|
|
sc->sdir = -1;
|
|
|
|
sc->invert_sdir = !!invert_sdir;
|
2016-05-25 17:37:40 +02:00
|
|
|
return sc;
|
|
|
|
}
|
|
|
|
|
2016-11-30 07:58:45 +01:00
|
|
|
// Free memory associated with a 'stepcompress' object
|
|
|
|
void
|
|
|
|
stepcompress_free(struct stepcompress *sc)
|
|
|
|
{
|
|
|
|
if (!sc)
|
|
|
|
return;
|
|
|
|
free(sc->queue);
|
|
|
|
message_queue_free(&sc->msg_queue);
|
|
|
|
free(sc);
|
|
|
|
}
|
|
|
|
|
2016-11-13 04:55:27 +01:00
|
|
|
// Convert previously scheduled steps into commands for the mcu
|
2017-02-06 17:37:03 +01:00
|
|
|
static int
|
2016-11-13 04:55:27 +01:00
|
|
|
stepcompress_flush(struct stepcompress *sc, uint64_t move_clock)
|
|
|
|
{
|
|
|
|
if (sc->queue_pos >= sc->queue_next)
|
2017-02-06 17:37:03 +01:00
|
|
|
return 0;
|
2017-08-30 18:42:53 +02:00
|
|
|
while (sc->last_step_clock < move_clock) {
|
2016-11-13 04:55:27 +01:00
|
|
|
struct step_move move = compress_bisect_add(sc);
|
2017-02-06 17:37:03 +01:00
|
|
|
int ret = check_line(sc, move);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2016-11-13 04:55:27 +01:00
|
|
|
|
|
|
|
uint32_t msg[5] = {
|
|
|
|
sc->queue_step_msgid, sc->oid, move.interval, move.count, move.add
|
|
|
|
};
|
|
|
|
struct queue_message *qm = message_alloc_and_encode(msg, 5);
|
|
|
|
qm->min_clock = qm->req_clock = sc->last_step_clock;
|
2017-08-30 18:42:53 +02:00
|
|
|
int32_t addfactor = move.count*(move.count-1)/2;
|
|
|
|
uint32_t ticks = move.add*addfactor + move.interval*move.count;
|
|
|
|
sc->last_step_clock += ticks;
|
2016-12-30 23:02:28 +01:00
|
|
|
if (sc->homing_clock)
|
|
|
|
// When homing, all steps should be sent prior to homing_clock
|
|
|
|
qm->min_clock = qm->req_clock = sc->homing_clock;
|
2016-11-13 04:55:27 +01:00
|
|
|
list_add_tail(&qm->node, &sc->msg_queue);
|
|
|
|
|
|
|
|
if (sc->queue_pos + move.count >= sc->queue_next) {
|
|
|
|
sc->queue_pos = sc->queue_next = sc->queue;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
sc->queue_pos += move.count;
|
|
|
|
}
|
2017-02-06 17:37:03 +01:00
|
|
|
return 0;
|
2016-11-13 04:55:27 +01:00
|
|
|
}
|
|
|
|
|
2017-08-30 18:42:53 +02:00
|
|
|
// Generate a queue_step for a step far in the future from the last step
|
|
|
|
static int
|
|
|
|
stepcompress_flush_far(struct stepcompress *sc, uint64_t abs_step_clock)
|
|
|
|
{
|
|
|
|
uint32_t msg[5] = {
|
|
|
|
sc->queue_step_msgid, sc->oid, abs_step_clock - sc->last_step_clock, 1, 0
|
|
|
|
};
|
|
|
|
struct queue_message *qm = message_alloc_and_encode(msg, 5);
|
|
|
|
qm->min_clock = sc->last_step_clock;
|
|
|
|
sc->last_step_clock = qm->req_clock = abs_step_clock;
|
|
|
|
if (sc->homing_clock)
|
|
|
|
// When homing, all steps should be sent prior to homing_clock
|
|
|
|
qm->min_clock = qm->req_clock = sc->homing_clock;
|
|
|
|
list_add_tail(&qm->node, &sc->msg_queue);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-11-10 18:44:04 +01:00
|
|
|
// Send the set_next_step_dir command
|
2017-02-06 17:37:03 +01:00
|
|
|
static int
|
2016-11-10 18:44:04 +01:00
|
|
|
set_next_step_dir(struct stepcompress *sc, int sdir)
|
|
|
|
{
|
2016-12-31 19:21:53 +01:00
|
|
|
if (sc->sdir == sdir)
|
2017-02-06 17:37:03 +01:00
|
|
|
return 0;
|
2016-11-10 18:44:04 +01:00
|
|
|
sc->sdir = sdir;
|
2017-02-06 17:37:03 +01:00
|
|
|
int ret = stepcompress_flush(sc, UINT64_MAX);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2016-11-10 18:44:04 +01:00
|
|
|
uint32_t msg[3] = {
|
|
|
|
sc->set_next_step_dir_msgid, sc->oid, sdir ^ sc->invert_sdir
|
|
|
|
};
|
|
|
|
struct queue_message *qm = message_alloc_and_encode(msg, 3);
|
2016-12-30 23:02:28 +01:00
|
|
|
qm->req_clock = sc->homing_clock ?: sc->last_step_clock;
|
2016-11-10 18:44:04 +01:00
|
|
|
list_add_tail(&qm->node, &sc->msg_queue);
|
2017-02-06 17:37:03 +01:00
|
|
|
return 0;
|
2016-11-10 18:44:04 +01:00
|
|
|
}
|
|
|
|
|
2017-04-05 01:37:54 +02:00
|
|
|
// Reset the internal state of the stepcompress object
|
|
|
|
int
|
|
|
|
stepcompress_reset(struct stepcompress *sc, uint64_t last_step_clock)
|
|
|
|
{
|
|
|
|
int ret = stepcompress_flush(sc, UINT64_MAX);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
sc->last_step_clock = last_step_clock;
|
|
|
|
sc->sdir = -1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Indicate the stepper is in homing mode (or done homing if zero)
|
|
|
|
int
|
|
|
|
stepcompress_set_homing(struct stepcompress *sc, uint64_t homing_clock)
|
|
|
|
{
|
|
|
|
int ret = stepcompress_flush(sc, UINT64_MAX);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
sc->homing_clock = homing_clock;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Queue an mcu command to go out in order with stepper commands
|
|
|
|
int
|
|
|
|
stepcompress_queue_msg(struct stepcompress *sc, uint32_t *data, int len)
|
|
|
|
{
|
|
|
|
int ret = stepcompress_flush(sc, UINT64_MAX);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
struct queue_message *qm = message_alloc_and_encode(data, len);
|
|
|
|
qm->req_clock = sc->homing_clock ?: sc->last_step_clock;
|
|
|
|
list_add_tail(&qm->node, &sc->msg_queue);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-08-30 16:43:57 +02:00
|
|
|
/****************************************************************
|
|
|
|
* Queue management
|
|
|
|
****************************************************************/
|
|
|
|
|
|
|
|
struct queue_append {
|
|
|
|
struct stepcompress *sc;
|
2017-08-30 18:42:53 +02:00
|
|
|
uint32_t *qnext, *qend, last_step_clock_32;
|
2017-08-30 16:43:57 +02:00
|
|
|
double clock_offset;
|
|
|
|
};
|
|
|
|
|
2017-08-30 18:42:53 +02:00
|
|
|
// Maximium clock delta between messages in the queue
|
|
|
|
#define CLOCK_DIFF_MAX (3<<28)
|
|
|
|
|
2017-08-30 16:43:57 +02:00
|
|
|
// Create a cursor for inserting clock times into the queue
|
|
|
|
static inline struct queue_append
|
|
|
|
queue_append_start(struct stepcompress *sc, double clock_offset, double adjust)
|
|
|
|
{
|
|
|
|
return (struct queue_append) {
|
|
|
|
.sc = sc, .qnext = sc->queue_next, .qend = sc->queue_end,
|
2017-08-30 18:42:53 +02:00
|
|
|
.last_step_clock_32 = sc->last_step_clock,
|
|
|
|
.clock_offset = (clock_offset - (double)sc->last_step_clock) + adjust };
|
2017-08-30 16:43:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Finalize a cursor created with queue_append_start()
|
|
|
|
static inline void
|
|
|
|
queue_append_finish(struct queue_append qa)
|
|
|
|
{
|
|
|
|
qa.sc->queue_next = qa.qnext;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Slow path for queue_append()
|
|
|
|
static int
|
2017-08-30 18:42:53 +02:00
|
|
|
queue_append_slow(struct stepcompress *sc, double rel_sc)
|
2017-08-30 16:43:57 +02:00
|
|
|
{
|
2017-08-30 18:42:53 +02:00
|
|
|
uint64_t abs_step_clock = (uint64_t)rel_sc + sc->last_step_clock;
|
|
|
|
if (abs_step_clock >= sc->last_step_clock + CLOCK_DIFF_MAX) {
|
|
|
|
// Avoid integer overflow on steps far in the future
|
|
|
|
int ret = stepcompress_flush(sc, abs_step_clock - CLOCK_DIFF_MAX + 1);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (abs_step_clock >= sc->last_step_clock + CLOCK_DIFF_MAX)
|
|
|
|
return stepcompress_flush_far(sc, abs_step_clock);
|
|
|
|
}
|
|
|
|
|
2017-08-30 16:43:57 +02:00
|
|
|
if (sc->queue_next - sc->queue_pos > 65535 + 2000) {
|
|
|
|
// No point in keeping more than 64K steps in memory
|
|
|
|
int ret = stepcompress_flush(sc, *(sc->queue_next - 65535));
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int in_use = sc->queue_next - sc->queue_pos;
|
|
|
|
if (sc->queue_pos > sc->queue) {
|
|
|
|
// Shuffle the internal queue to avoid having to allocate more ram
|
|
|
|
memmove(sc->queue, sc->queue_pos, in_use * sizeof(*sc->queue));
|
|
|
|
} else {
|
|
|
|
// Expand the internal queue of step times
|
|
|
|
int alloc = sc->queue_end - sc->queue;
|
|
|
|
if (!alloc)
|
|
|
|
alloc = QUEUE_START_SIZE;
|
|
|
|
while (in_use >= alloc)
|
|
|
|
alloc *= 2;
|
|
|
|
sc->queue = realloc(sc->queue, alloc * sizeof(*sc->queue));
|
|
|
|
sc->queue_end = sc->queue + alloc;
|
|
|
|
}
|
|
|
|
sc->queue_pos = sc->queue;
|
|
|
|
sc->queue_next = sc->queue + in_use;
|
|
|
|
*sc->queue_next++ = abs_step_clock;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add a clock time to the queue (flushing the queue if needed)
|
|
|
|
static inline int
|
|
|
|
queue_append(struct queue_append *qa, double step_clock)
|
|
|
|
{
|
2017-08-30 18:42:53 +02:00
|
|
|
double rel_sc = step_clock + qa->clock_offset;
|
|
|
|
if (likely(qa->qnext < qa->qend && rel_sc < (double)CLOCK_DIFF_MAX)) {
|
|
|
|
*qa->qnext++ = qa->last_step_clock_32 + (uint32_t)rel_sc;
|
2017-08-30 16:43:57 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2017-08-30 18:42:53 +02:00
|
|
|
// Call queue_append_slow() to handle queue expansion and integer overflow
|
2017-08-30 16:43:57 +02:00
|
|
|
struct stepcompress *sc = qa->sc;
|
2017-08-30 18:42:53 +02:00
|
|
|
uint64_t old_last_step_clock = sc->last_step_clock;
|
2017-08-30 16:43:57 +02:00
|
|
|
sc->queue_next = qa->qnext;
|
2017-08-30 18:42:53 +02:00
|
|
|
int ret = queue_append_slow(sc, rel_sc);
|
2017-08-30 16:43:57 +02:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
qa->qnext = sc->queue_next;
|
|
|
|
qa->qend = sc->queue_end;
|
2017-08-30 18:42:53 +02:00
|
|
|
qa->last_step_clock_32 = sc->last_step_clock;
|
|
|
|
qa->clock_offset -= sc->last_step_clock - old_last_step_clock;
|
2017-08-30 16:43:57 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-04-05 01:37:54 +02:00
|
|
|
/****************************************************************
|
|
|
|
* Motion to step conversions
|
|
|
|
****************************************************************/
|
|
|
|
|
2017-08-30 16:43:57 +02:00
|
|
|
// Common suffixes: _sd is step distance (a unit length the same
|
|
|
|
// distance the stepper moves on each step), _sv is step velocity (in
|
|
|
|
// units of step distance per clock tick), _sd2 is step distance
|
|
|
|
// squared, _r is ratio (scalar usually between 0.0 and 1.0). Times
|
|
|
|
// are represented as clock ticks (a unit of time determined by a
|
|
|
|
// micro-controller tick) and acceleration is in units of step
|
|
|
|
// distance per clock ticks squared.
|
|
|
|
|
2017-04-05 01:37:54 +02:00
|
|
|
// Wrapper around sqrt() to handle small negative numbers
|
|
|
|
static double
|
|
|
|
_safe_sqrt(double v)
|
|
|
|
{
|
|
|
|
// Due to floating point truncation, it's possible to get a small
|
|
|
|
// negative number - treat it as zero.
|
|
|
|
if (v < -0.001)
|
|
|
|
errorf("safe_sqrt of %.9f", v);
|
|
|
|
return 0.;
|
|
|
|
}
|
|
|
|
static inline double safe_sqrt(double v) {
|
|
|
|
return likely(v >= 0.) ? sqrt(v) : _safe_sqrt(v);
|
|
|
|
}
|
|
|
|
|
2016-05-25 17:37:40 +02:00
|
|
|
// Schedule a step event at the specified step_clock time
|
2017-02-06 17:37:03 +01:00
|
|
|
int
|
2016-11-10 18:44:04 +01:00
|
|
|
stepcompress_push(struct stepcompress *sc, double step_clock, int32_t sdir)
|
2016-05-25 17:37:40 +02:00
|
|
|
{
|
2017-02-06 17:37:03 +01:00
|
|
|
int ret = set_next_step_dir(sc, !!sdir);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-08-30 16:43:57 +02:00
|
|
|
struct queue_append qa = queue_append_start(sc, step_clock, 0.5);
|
|
|
|
ret = queue_append(&qa, 0.);
|
2017-02-06 17:37:03 +01:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-08-30 16:43:57 +02:00
|
|
|
queue_append_finish(qa);
|
2017-02-06 17:37:03 +01:00
|
|
|
return 0;
|
2016-05-25 17:37:40 +02:00
|
|
|
}
|
|
|
|
|
2017-04-07 18:51:52 +02:00
|
|
|
// Schedule 'steps' number of steps at constant acceleration. If
|
|
|
|
// acceleration is zero (ie, constant velocity) it uses the formula:
|
|
|
|
// step_clock = clock_offset + step_num/start_sv
|
|
|
|
// Otherwise it uses the formula:
|
|
|
|
// step_clock = (clock_offset + sqrt(2*step_num/accel + (start_sv/accel)**2)
|
|
|
|
// - start_sv/accel)
|
2016-11-10 18:44:04 +01:00
|
|
|
int32_t
|
2017-04-07 18:51:52 +02:00
|
|
|
stepcompress_push_const(
|
|
|
|
struct stepcompress *sc, double clock_offset
|
|
|
|
, double step_offset, double steps, double start_sv, double accel)
|
2016-05-25 17:37:40 +02:00
|
|
|
{
|
|
|
|
// Calculate number of steps to take
|
2016-11-10 18:44:04 +01:00
|
|
|
int sdir = 1;
|
|
|
|
if (steps < 0) {
|
|
|
|
sdir = 0;
|
|
|
|
steps = -steps;
|
|
|
|
step_offset = -step_offset;
|
2016-07-13 20:47:03 +02:00
|
|
|
}
|
2016-11-10 18:44:04 +01:00
|
|
|
int count = steps + .5 - step_offset;
|
2016-12-24 05:13:35 +01:00
|
|
|
if (count <= 0 || count > 10000000) {
|
2017-02-06 17:37:03 +01:00
|
|
|
if (count && steps) {
|
2017-04-07 18:51:52 +02:00
|
|
|
errorf("push_const invalid count %d %f %f %f %f %f"
|
|
|
|
, sc->oid, clock_offset, step_offset, steps
|
|
|
|
, start_sv, accel);
|
2017-02-06 17:37:03 +01:00
|
|
|
return ERROR_RET;
|
|
|
|
}
|
2016-11-10 18:44:04 +01:00
|
|
|
return 0;
|
|
|
|
}
|
2017-02-06 17:37:03 +01:00
|
|
|
int ret = set_next_step_dir(sc, sdir);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2016-12-31 19:21:53 +01:00
|
|
|
int res = sdir ? count : -count;
|
2016-05-25 17:37:40 +02:00
|
|
|
|
|
|
|
// Calculate each step time
|
2017-04-07 18:51:52 +02:00
|
|
|
if (!accel) {
|
|
|
|
// Move at constant velocity (zero acceleration)
|
2017-08-30 16:43:57 +02:00
|
|
|
struct queue_append qa = queue_append_start(sc, clock_offset, .5);
|
2017-04-07 18:51:52 +02:00
|
|
|
double inv_cruise_sv = 1. / start_sv;
|
2017-08-31 08:15:32 +02:00
|
|
|
double pos = (step_offset + .5) * inv_cruise_sv;
|
2017-04-07 18:51:52 +02:00
|
|
|
while (count--) {
|
2017-08-31 08:15:32 +02:00
|
|
|
ret = queue_append(&qa, pos);
|
2017-04-07 18:51:52 +02:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-08-31 08:15:32 +02:00
|
|
|
pos += inv_cruise_sv;
|
2017-04-07 18:51:52 +02:00
|
|
|
}
|
2017-08-30 16:43:57 +02:00
|
|
|
queue_append_finish(qa);
|
2017-04-07 18:51:52 +02:00
|
|
|
} else {
|
|
|
|
// Move with constant acceleration
|
|
|
|
double inv_accel = 1. / accel;
|
2017-08-31 08:15:32 +02:00
|
|
|
double accel_time = start_sv * inv_accel;
|
2017-08-30 16:43:57 +02:00
|
|
|
struct queue_append qa = queue_append_start(
|
2017-08-31 08:15:32 +02:00
|
|
|
sc, clock_offset, 0.5 - accel_time);
|
2017-04-07 18:51:52 +02:00
|
|
|
double accel_multiplier = 2. * inv_accel;
|
2017-08-31 08:15:32 +02:00
|
|
|
double pos = (step_offset + .5)*accel_multiplier + accel_time*accel_time;
|
2017-04-07 18:51:52 +02:00
|
|
|
while (count--) {
|
2017-08-31 08:15:32 +02:00
|
|
|
double v = safe_sqrt(pos);
|
2017-08-30 16:43:57 +02:00
|
|
|
int ret = queue_append(&qa, accel_multiplier >= 0. ? v : -v);
|
2017-04-07 18:51:52 +02:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-08-31 08:15:32 +02:00
|
|
|
pos += accel_multiplier;
|
2017-02-06 17:37:03 +01:00
|
|
|
}
|
2017-08-30 16:43:57 +02:00
|
|
|
queue_append_finish(qa);
|
2016-11-14 21:36:11 +01:00
|
|
|
}
|
2016-12-31 19:21:53 +01:00
|
|
|
return res;
|
2016-05-25 17:37:40 +02:00
|
|
|
}
|
|
|
|
|
2017-04-07 16:49:14 +02:00
|
|
|
// Schedule steps using delta kinematics
|
2017-04-07 17:47:24 +02:00
|
|
|
static int32_t
|
|
|
|
_stepcompress_push_delta(
|
2017-04-07 17:55:26 +02:00
|
|
|
struct stepcompress *sc, int sdir
|
|
|
|
, double clock_offset, double move_sd, double start_sv, double accel
|
2017-04-06 17:09:08 +02:00
|
|
|
, double height, double startxy_sd, double arm_sd, double movez_r)
|
2016-09-15 18:20:49 +02:00
|
|
|
{
|
|
|
|
// Calculate number of steps to take
|
2016-12-05 01:30:35 +01:00
|
|
|
double movexy_r = movez_r ? sqrt(1. - movez_r*movez_r) : 1.;
|
2017-04-06 17:09:08 +02:00
|
|
|
double arm_sd2 = arm_sd * arm_sd;
|
|
|
|
double endxy_sd = startxy_sd - movexy_r*move_sd;
|
|
|
|
double end_height = safe_sqrt(arm_sd2 - endxy_sd*endxy_sd);
|
2017-04-07 17:55:26 +02:00
|
|
|
int count = (end_height + movez_r*move_sd - height) * (sdir ? 1. : -1.) + .5;
|
2016-12-24 05:13:35 +01:00
|
|
|
if (count <= 0 || count > 10000000) {
|
2017-02-06 17:37:03 +01:00
|
|
|
if (count) {
|
2017-04-06 17:09:08 +02:00
|
|
|
errorf("push_delta invalid count %d %d %f %f %f %f %f %f %f %f"
|
|
|
|
, sc->oid, count, clock_offset, move_sd, start_sv, accel
|
|
|
|
, height, startxy_sd, arm_sd, movez_r);
|
2017-02-06 17:37:03 +01:00
|
|
|
return ERROR_RET;
|
|
|
|
}
|
2016-09-15 18:20:49 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2017-04-07 17:55:26 +02:00
|
|
|
int ret = set_next_step_dir(sc, sdir);
|
2017-02-06 17:37:03 +01:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-04-07 17:55:26 +02:00
|
|
|
int res = sdir ? count : -count;
|
2016-09-15 18:20:49 +02:00
|
|
|
|
|
|
|
// Calculate each step time
|
2017-04-07 17:55:26 +02:00
|
|
|
height += (sdir ? .5 : -.5);
|
2017-04-07 16:49:14 +02:00
|
|
|
if (!accel) {
|
|
|
|
// Move at constant velocity (zero acceleration)
|
2017-08-30 16:43:57 +02:00
|
|
|
struct queue_append qa = queue_append_start(sc, clock_offset, .5);
|
2017-04-07 16:49:14 +02:00
|
|
|
double inv_cruise_sv = 1. / start_sv;
|
|
|
|
if (!movez_r) {
|
2017-04-20 06:30:28 +02:00
|
|
|
// Optimized case for common XY only moves (no Z movement)
|
2017-04-07 16:49:14 +02:00
|
|
|
while (count--) {
|
2017-04-06 17:09:08 +02:00
|
|
|
double v = safe_sqrt(arm_sd2 - height*height);
|
2017-04-20 06:30:28 +02:00
|
|
|
double pos = startxy_sd + (sdir ? -v : v);
|
2017-08-30 16:43:57 +02:00
|
|
|
int ret = queue_append(&qa, pos * inv_cruise_sv);
|
2017-04-08 01:05:04 +02:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-04-07 17:55:26 +02:00
|
|
|
height += (sdir ? 1. : -1.);
|
2017-04-07 16:49:14 +02:00
|
|
|
}
|
|
|
|
} else if (!movexy_r) {
|
2017-04-20 06:30:28 +02:00
|
|
|
// Optimized case for Z only moves
|
2017-08-31 08:15:32 +02:00
|
|
|
double pos = ((sdir ? height-end_height : end_height-height)
|
|
|
|
* inv_cruise_sv);
|
2017-04-07 16:49:14 +02:00
|
|
|
while (count--) {
|
2017-08-31 08:15:32 +02:00
|
|
|
int ret = queue_append(&qa, pos);
|
2017-04-07 16:49:14 +02:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-08-31 08:15:32 +02:00
|
|
|
pos += inv_cruise_sv;
|
2017-04-07 16:49:14 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// General case (handles XY+Z moves)
|
2017-04-20 06:30:28 +02:00
|
|
|
double start_pos = movexy_r*startxy_sd, zoffset = movez_r*startxy_sd;
|
2017-04-07 16:49:14 +02:00
|
|
|
while (count--) {
|
2017-04-20 06:30:28 +02:00
|
|
|
double relheight = movexy_r*height - zoffset;
|
2017-04-06 17:09:08 +02:00
|
|
|
double v = safe_sqrt(arm_sd2 - relheight*relheight);
|
2017-04-07 17:55:26 +02:00
|
|
|
double pos = start_pos + movez_r*height + (sdir ? -v : v);
|
2017-08-30 16:43:57 +02:00
|
|
|
int ret = queue_append(&qa, pos * inv_cruise_sv);
|
2017-04-08 01:05:04 +02:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-04-07 17:55:26 +02:00
|
|
|
height += (sdir ? 1. : -1.);
|
2017-04-07 16:49:14 +02:00
|
|
|
}
|
2016-12-05 19:45:39 +01:00
|
|
|
}
|
2017-08-30 16:43:57 +02:00
|
|
|
queue_append_finish(qa);
|
2016-12-05 19:45:39 +01:00
|
|
|
} else {
|
2017-04-07 16:49:14 +02:00
|
|
|
// Move with constant acceleration
|
2017-04-20 06:30:28 +02:00
|
|
|
double start_pos = movexy_r*startxy_sd, zoffset = movez_r*startxy_sd;
|
2017-04-07 16:49:14 +02:00
|
|
|
double inv_accel = 1. / accel;
|
|
|
|
start_pos += 0.5 * start_sv*start_sv * inv_accel;
|
2017-08-30 16:43:57 +02:00
|
|
|
struct queue_append qa = queue_append_start(
|
|
|
|
sc, clock_offset, 0.5 - start_sv * inv_accel);
|
2017-04-07 16:49:14 +02:00
|
|
|
double accel_multiplier = 2. * inv_accel;
|
2016-12-31 19:21:53 +01:00
|
|
|
while (count--) {
|
2017-04-20 06:30:28 +02:00
|
|
|
double relheight = movexy_r*height - zoffset;
|
2017-04-06 17:09:08 +02:00
|
|
|
double v = safe_sqrt(arm_sd2 - relheight*relheight);
|
2017-04-07 17:55:26 +02:00
|
|
|
double pos = start_pos + movez_r*height + (sdir ? -v : v);
|
2017-04-07 16:49:14 +02:00
|
|
|
v = safe_sqrt(pos * accel_multiplier);
|
2017-08-30 16:43:57 +02:00
|
|
|
int ret = queue_append(&qa, accel_multiplier >= 0. ? v : -v);
|
2017-04-08 01:05:04 +02:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-04-07 17:55:26 +02:00
|
|
|
height += (sdir ? 1. : -1.);
|
2016-12-05 19:45:39 +01:00
|
|
|
}
|
2017-08-30 16:43:57 +02:00
|
|
|
queue_append_finish(qa);
|
2016-09-15 18:20:49 +02:00
|
|
|
}
|
2016-12-31 19:21:53 +01:00
|
|
|
return res;
|
2016-09-15 18:20:49 +02:00
|
|
|
}
|
|
|
|
|
2017-04-07 17:47:24 +02:00
|
|
|
int32_t
|
|
|
|
stepcompress_push_delta(
|
|
|
|
struct stepcompress *sc, double clock_offset, double move_sd
|
|
|
|
, double start_sv, double accel
|
|
|
|
, double height, double startxy_sd, double arm_sd, double movez_r)
|
|
|
|
{
|
|
|
|
double reversexy_sd = startxy_sd + arm_sd*movez_r;
|
|
|
|
if (reversexy_sd <= 0.)
|
|
|
|
// All steps are in down direction
|
|
|
|
return _stepcompress_push_delta(
|
2017-04-07 17:55:26 +02:00
|
|
|
sc, 0, clock_offset, move_sd, start_sv, accel
|
2017-04-07 17:47:24 +02:00
|
|
|
, height, startxy_sd, arm_sd, movez_r);
|
|
|
|
double movexy_r = movez_r ? sqrt(1. - movez_r*movez_r) : 1.;
|
|
|
|
if (reversexy_sd >= move_sd * movexy_r)
|
|
|
|
// All steps are in up direction
|
|
|
|
return _stepcompress_push_delta(
|
2017-04-07 17:55:26 +02:00
|
|
|
sc, 1, clock_offset, move_sd, start_sv, accel
|
2017-04-07 17:47:24 +02:00
|
|
|
, height, startxy_sd, arm_sd, movez_r);
|
|
|
|
// Steps in both up and down direction
|
|
|
|
int res1 = _stepcompress_push_delta(
|
2017-04-07 17:55:26 +02:00
|
|
|
sc, 1, clock_offset, reversexy_sd / movexy_r, start_sv, accel
|
2017-04-07 17:47:24 +02:00
|
|
|
, height, startxy_sd, arm_sd, movez_r);
|
|
|
|
if (res1 == ERROR_RET)
|
|
|
|
return res1;
|
|
|
|
int res2 = _stepcompress_push_delta(
|
2017-04-07 17:55:26 +02:00
|
|
|
sc, 0, clock_offset, move_sd, start_sv, accel
|
2017-04-07 17:47:24 +02:00
|
|
|
, height + res1, startxy_sd, arm_sd, movez_r);
|
|
|
|
if (res2 == ERROR_RET)
|
|
|
|
return res2;
|
|
|
|
return res1 + res2;
|
|
|
|
}
|
|
|
|
|
2016-05-25 17:37:40 +02:00
|
|
|
|
|
|
|
/****************************************************************
|
|
|
|
* Step compress synchronization
|
|
|
|
****************************************************************/
|
|
|
|
|
|
|
|
// The steppersync object is used to synchronize the output of mcu
|
|
|
|
// step commands. The mcu can only queue a limited number of step
|
|
|
|
// commands - this code tracks when items on the mcu step queue become
|
|
|
|
// free so that new commands can be transmitted. It also ensures the
|
|
|
|
// mcu step queue is ordered between steppers so that no stepper
|
|
|
|
// starves the other steppers of space in the mcu step queue.
|
|
|
|
|
|
|
|
struct steppersync {
|
|
|
|
// Serial port
|
|
|
|
struct serialqueue *sq;
|
|
|
|
struct command_queue *cq;
|
|
|
|
// Storage for associated stepcompress objects
|
|
|
|
struct stepcompress **sc_list;
|
|
|
|
int sc_num;
|
|
|
|
// Storage for list of pending move clocks
|
|
|
|
uint64_t *move_clocks;
|
|
|
|
int num_move_clocks;
|
|
|
|
};
|
|
|
|
|
2016-07-13 20:47:03 +02:00
|
|
|
// Allocate a new 'steppersync' object
|
2016-05-25 17:37:40 +02:00
|
|
|
struct steppersync *
|
|
|
|
steppersync_alloc(struct serialqueue *sq, struct stepcompress **sc_list
|
|
|
|
, int sc_num, int move_num)
|
|
|
|
{
|
|
|
|
struct steppersync *ss = malloc(sizeof(*ss));
|
|
|
|
memset(ss, 0, sizeof(*ss));
|
|
|
|
ss->sq = sq;
|
|
|
|
ss->cq = serialqueue_alloc_commandqueue();
|
|
|
|
|
|
|
|
ss->sc_list = malloc(sizeof(*sc_list)*sc_num);
|
|
|
|
memcpy(ss->sc_list, sc_list, sizeof(*sc_list)*sc_num);
|
|
|
|
ss->sc_num = sc_num;
|
|
|
|
|
|
|
|
ss->move_clocks = malloc(sizeof(*ss->move_clocks)*move_num);
|
|
|
|
memset(ss->move_clocks, 0, sizeof(*ss->move_clocks)*move_num);
|
|
|
|
ss->num_move_clocks = move_num;
|
|
|
|
|
|
|
|
return ss;
|
|
|
|
}
|
|
|
|
|
2016-11-30 07:58:45 +01:00
|
|
|
// Free memory associated with a 'steppersync' object
|
|
|
|
void
|
|
|
|
steppersync_free(struct steppersync *ss)
|
|
|
|
{
|
|
|
|
if (!ss)
|
|
|
|
return;
|
|
|
|
free(ss->sc_list);
|
|
|
|
free(ss->move_clocks);
|
|
|
|
serialqueue_free_commandqueue(ss->cq);
|
|
|
|
free(ss);
|
|
|
|
}
|
|
|
|
|
2016-05-25 17:37:40 +02:00
|
|
|
// Implement a binary heap algorithm to track when the next available
|
|
|
|
// 'struct move' in the mcu will be available
|
|
|
|
static void
|
|
|
|
heap_replace(struct steppersync *ss, uint64_t req_clock)
|
|
|
|
{
|
|
|
|
uint64_t *mc = ss->move_clocks;
|
|
|
|
int nmc = ss->num_move_clocks, pos = 0;
|
|
|
|
for (;;) {
|
|
|
|
int child1_pos = 2*pos+1, child2_pos = 2*pos+2;
|
|
|
|
uint64_t child2_clock = child2_pos < nmc ? mc[child2_pos] : UINT64_MAX;
|
|
|
|
uint64_t child1_clock = child1_pos < nmc ? mc[child1_pos] : UINT64_MAX;
|
|
|
|
if (req_clock <= child1_clock && req_clock <= child2_clock) {
|
|
|
|
mc[pos] = req_clock;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (child1_clock < child2_clock) {
|
|
|
|
mc[pos] = child1_clock;
|
|
|
|
pos = child1_pos;
|
|
|
|
} else {
|
|
|
|
mc[pos] = child2_clock;
|
|
|
|
pos = child2_pos;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find and transmit any scheduled steps prior to the given 'move_clock'
|
2017-02-06 17:37:03 +01:00
|
|
|
int
|
2016-05-25 17:37:40 +02:00
|
|
|
steppersync_flush(struct steppersync *ss, uint64_t move_clock)
|
|
|
|
{
|
|
|
|
// Flush each stepcompress to the specified move_clock
|
|
|
|
int i;
|
2017-02-06 17:37:03 +01:00
|
|
|
for (i=0; i<ss->sc_num; i++) {
|
|
|
|
int ret = stepcompress_flush(ss->sc_list[i], move_clock);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2016-05-25 17:37:40 +02:00
|
|
|
|
|
|
|
// Order commands by the reqclock of each pending command
|
|
|
|
struct list_head msgs;
|
|
|
|
list_init(&msgs);
|
|
|
|
for (;;) {
|
|
|
|
// Find message with lowest reqclock
|
|
|
|
uint64_t req_clock = MAX_CLOCK;
|
|
|
|
struct queue_message *qm = NULL;
|
|
|
|
for (i=0; i<ss->sc_num; i++) {
|
|
|
|
struct stepcompress *sc = ss->sc_list[i];
|
|
|
|
if (!list_empty(&sc->msg_queue)) {
|
|
|
|
struct queue_message *m = list_first_entry(
|
|
|
|
&sc->msg_queue, struct queue_message, node);
|
|
|
|
if (m->req_clock < req_clock) {
|
|
|
|
qm = m;
|
|
|
|
req_clock = m->req_clock;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-11-02 04:11:27 +01:00
|
|
|
if (!qm || (qm->min_clock && req_clock > move_clock))
|
2016-05-25 17:37:40 +02:00
|
|
|
break;
|
|
|
|
|
2016-11-02 04:11:27 +01:00
|
|
|
uint64_t next_avail = ss->move_clocks[0];
|
|
|
|
if (qm->min_clock)
|
|
|
|
// The qm->min_clock field is overloaded to indicate that
|
|
|
|
// the command uses the 'move queue' and to store the time
|
|
|
|
// that move queue item becomes available.
|
|
|
|
heap_replace(ss, qm->min_clock);
|
|
|
|
// Reset the min_clock to its normal meaning (minimum transmit time)
|
|
|
|
qm->min_clock = next_avail;
|
2016-05-25 17:37:40 +02:00
|
|
|
|
|
|
|
// Batch this command
|
|
|
|
list_del(&qm->node);
|
|
|
|
list_add_tail(&qm->node, &msgs);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Transmit commands
|
|
|
|
if (!list_empty(&msgs))
|
|
|
|
serialqueue_send_batch(ss->sq, ss->cq, &msgs);
|
2017-02-06 17:37:03 +01:00
|
|
|
return 0;
|
2016-05-25 17:37:40 +02:00
|
|
|
}
|