|
@@ -56,16 +56,15 @@
|
56
|
56
|
ring_buffer_r rx_buffer = { { 0 }, 0, 0 };
|
57
|
57
|
#if TX_BUFFER_SIZE > 0
|
58
|
58
|
ring_buffer_t tx_buffer = { { 0 }, 0, 0 };
|
59
|
|
- static bool _written;
|
60
|
59
|
#endif
|
|
60
|
+ static bool _written;
|
61
|
61
|
#endif
|
62
|
62
|
|
63
|
63
|
#if ENABLED(SERIAL_XON_XOFF)
|
64
|
|
- constexpr uint8_t XON_XOFF_CHAR_SENT = 0x80; // XON / XOFF Character was sent
|
65
|
|
- constexpr uint8_t XON_XOFF_CHAR_MASK = 0x1F; // XON / XOFF character to send
|
|
64
|
+ constexpr uint8_t XON_XOFF_CHAR_SENT = 0x80, // XON / XOFF Character was sent
|
|
65
|
+ XON_XOFF_CHAR_MASK = 0x1F; // XON / XOFF character to send
|
66
|
66
|
// XON / XOFF character definitions
|
67
|
|
- constexpr uint8_t XON_CHAR = 17;
|
68
|
|
- constexpr uint8_t XOFF_CHAR = 19;
|
|
67
|
+ constexpr uint8_t XON_CHAR = 17, XOFF_CHAR = 19;
|
69
|
68
|
uint8_t xon_xoff_state = XON_XOFF_CHAR_SENT | XON_CHAR;
|
70
|
69
|
#endif
|
71
|
70
|
|
|
@@ -73,6 +72,14 @@
|
73
|
72
|
uint8_t rx_dropped_bytes = 0;
|
74
|
73
|
#endif
|
75
|
74
|
|
|
75
|
+ #if ENABLED(SERIAL_STATS_RX_BUFFER_OVERRUNS)
|
|
76
|
+ uint8_t rx_buffer_overruns = 0;
|
|
77
|
+ #endif
|
|
78
|
+
|
|
79
|
+ #if ENABLED(SERIAL_STATS_RX_FRAMING_ERRORS)
|
|
80
|
+ uint8_t rx_framing_errors = 0;
|
|
81
|
+ #endif
|
|
82
|
+
|
76
|
83
|
#if ENABLED(SERIAL_STATS_MAX_RX_QUEUED)
|
77
|
84
|
ring_buffer_pos_t rx_max_enqueued = 0;
|
78
|
85
|
#endif
|
|
@@ -91,125 +98,209 @@
|
91
|
98
|
static EmergencyParser::State emergency_state; // = EP_RESET
|
92
|
99
|
#endif
|
93
|
100
|
|
94
|
|
- const ring_buffer_pos_t h = rx_buffer.head,
|
95
|
|
- i = (ring_buffer_pos_t)(h + 1) & (ring_buffer_pos_t)(RX_BUFFER_SIZE - 1);
|
|
101
|
+ // Get the tail - Nothing can alter its value while we are at this ISR
|
|
102
|
+ const ring_buffer_pos_t t = rx_buffer.tail;
|
|
103
|
+
|
|
104
|
+ // Get the head pointer
|
|
105
|
+ ring_buffer_pos_t h = rx_buffer.head;
|
|
106
|
+
|
|
107
|
+ // Get the next element
|
|
108
|
+ ring_buffer_pos_t i = (ring_buffer_pos_t)(h + 1) & (ring_buffer_pos_t)(RX_BUFFER_SIZE - 1);
|
96
|
109
|
|
97
|
|
- // Read the character
|
98
|
|
- const uint8_t c = M_UDRx;
|
|
110
|
+ // This must read the M_UCSRxA register before reading the received byte to detect error causes
|
|
111
|
+ #if ENABLED(SERIAL_STATS_DROPPED_RX)
|
|
112
|
+ if (TEST(M_UCSRxA, M_DORx) && !++rx_dropped_bytes) --rx_dropped_bytes;
|
|
113
|
+ #endif
|
|
114
|
+
|
|
115
|
+ #if ENABLED(SERIAL_STATS_RX_BUFFER_OVERRUNS)
|
|
116
|
+ if (TEST(M_UCSRxA, M_DORx) && !++rx_buffer_overruns) --rx_buffer_overruns;
|
|
117
|
+ #endif
|
|
118
|
+
|
|
119
|
+ #if ENABLED(SERIAL_STATS_RX_FRAMING_ERRORS)
|
|
120
|
+ if (TEST(M_UCSRxA, M_FEx) && !++rx_framing_errors) --rx_framing_errors;
|
|
121
|
+ #endif
|
|
122
|
+
|
|
123
|
+ // Read the character from the USART
|
|
124
|
+ uint8_t c = M_UDRx;
|
|
125
|
+
|
|
126
|
+ #if ENABLED(EMERGENCY_PARSER)
|
|
127
|
+ emergency_parser.update(emergency_state, c);
|
|
128
|
+ #endif
|
99
|
129
|
|
100
|
130
|
// If the character is to be stored at the index just before the tail
|
101
|
|
- // (such that the head would advance to the current tail), the buffer is
|
102
|
|
- // critical, so don't write the character or advance the head.
|
103
|
|
- if (i != rx_buffer.tail) {
|
|
131
|
+ // (such that the head would advance to the current tail), the RX FIFO is
|
|
132
|
+ // full, so don't write the character or advance the head.
|
|
133
|
+ if (i != t) {
|
104
|
134
|
rx_buffer.buffer[h] = c;
|
105
|
|
- rx_buffer.head = i;
|
106
|
|
- }
|
107
|
|
- else {
|
108
|
|
- #if ENABLED(SERIAL_STATS_DROPPED_RX)
|
109
|
|
- if (!++rx_dropped_bytes) ++rx_dropped_bytes;
|
110
|
|
- #endif
|
|
135
|
+ h = i;
|
111
|
136
|
}
|
|
137
|
+ #if ENABLED(SERIAL_STATS_DROPPED_RX)
|
|
138
|
+ else if (!++rx_dropped_bytes) --rx_dropped_bytes;
|
|
139
|
+ #endif
|
112
|
140
|
|
113
|
141
|
#if ENABLED(SERIAL_STATS_MAX_RX_QUEUED)
|
114
|
|
- // calculate count of bytes stored into the RX buffer
|
115
|
|
- ring_buffer_pos_t rx_count = (ring_buffer_pos_t)(rx_buffer.head - rx_buffer.tail) & (ring_buffer_pos_t)(RX_BUFFER_SIZE - 1);
|
|
142
|
+ // Calculate count of bytes stored into the RX buffer
|
|
143
|
+ const ring_buffer_pos_t rx_count = (ring_buffer_pos_t)(h - t) & (ring_buffer_pos_t)(RX_BUFFER_SIZE - 1);
|
|
144
|
+
|
116
|
145
|
// Keep track of the maximum count of enqueued bytes
|
117
|
146
|
NOLESS(rx_max_enqueued, rx_count);
|
118
|
147
|
#endif
|
119
|
148
|
|
120
|
149
|
#if ENABLED(SERIAL_XON_XOFF)
|
121
|
|
-
|
122
|
|
- // for high speed transfers, we can use XON/XOFF protocol to do
|
123
|
|
- // software handshake and avoid overruns.
|
|
150
|
+ // If the last char that was sent was an XON
|
124
|
151
|
if ((xon_xoff_state & XON_XOFF_CHAR_MASK) == XON_CHAR) {
|
125
|
152
|
|
126
|
|
- // calculate count of bytes stored into the RX buffer
|
127
|
|
- ring_buffer_pos_t rx_count = (ring_buffer_pos_t)(rx_buffer.head - rx_buffer.tail) & (ring_buffer_pos_t)(RX_BUFFER_SIZE - 1);
|
|
153
|
+ // Bytes stored into the RX buffer
|
|
154
|
+ const ring_buffer_pos_t rx_count = (ring_buffer_pos_t)(h - t) & (ring_buffer_pos_t)(RX_BUFFER_SIZE - 1);
|
128
|
155
|
|
129
|
|
- // if we are above 12.5% of RX buffer capacity, send XOFF before
|
130
|
|
- // we run out of RX buffer space .. We need 325 bytes @ 250kbits/s to
|
131
|
|
- // let the host react and stop sending bytes. This translates to 13mS
|
132
|
|
- // propagation time.
|
|
156
|
+ // If over 12.5% of RX buffer capacity, send XOFF before running out of
|
|
157
|
+ // RX buffer space .. 325 bytes @ 250kbits/s needed to let the host react
|
|
158
|
+ // and stop sending bytes. This translates to 13mS propagation time.
|
133
|
159
|
if (rx_count >= (RX_BUFFER_SIZE) / 8) {
|
134
|
160
|
|
135
|
|
- // If TX interrupts are disabled and data register is empty,
|
136
|
|
- // just write the byte to the data register and be done. This
|
137
|
|
- // shortcut helps significantly improve the effective datarate
|
138
|
|
- // at high (>500kbit/s) bitrates, where interrupt overhead
|
139
|
|
- // becomes a slowdown.
|
140
|
|
- if (!TEST(M_UCSRxB, M_UDRIEx) && TEST(M_UCSRxA, M_UDREx)) {
|
141
|
|
-
|
142
|
|
- // Send an XOFF character
|
143
|
|
- M_UDRx = XOFF_CHAR;
|
144
|
|
-
|
145
|
|
- // clear the TXC bit -- "can be cleared by writing a one to its bit
|
146
|
|
- // location". This makes sure flush() won't return until the bytes
|
147
|
|
- // actually got written
|
148
|
|
- SBI(M_UCSRxA, M_TXCx);
|
149
|
|
-
|
150
|
|
- // And remember it was sent
|
151
|
|
- xon_xoff_state = XOFF_CHAR | XON_XOFF_CHAR_SENT;
|
|
161
|
+ // At this point, definitely no TX interrupt was executing, since the TX isr can't be preempted.
|
|
162
|
+ // Don't enable the TX interrupt here as a means to trigger the XOFF char, because if it happens
|
|
163
|
+ // to be in the middle of trying to disable the RX interrupt in the main program, eventually the
|
|
164
|
+ // enabling of the TX interrupt could be undone. The ONLY reliable thing this can do to ensure
|
|
165
|
+ // the sending of the XOFF char is to send it HERE AND NOW.
|
|
166
|
+
|
|
167
|
+ // About to send the XOFF char
|
|
168
|
+ xon_xoff_state = XOFF_CHAR | XON_XOFF_CHAR_SENT;
|
|
169
|
+
|
|
170
|
+ // Wait until the TX register becomes empty and send it - Here there could be a problem
|
|
171
|
+ // - While waiting for the TX register to empty, the RX register could receive a new
|
|
172
|
+ // character. This must also handle that situation!
|
|
173
|
+ while (!TEST(M_UCSRxA, M_UDREx)) {
|
|
174
|
+
|
|
175
|
+ if (TEST(M_UCSRxA,M_RXCx)) {
|
|
176
|
+ // A char arrived while waiting for the TX buffer to be empty - Receive and process it!
|
|
177
|
+
|
|
178
|
+ i = (ring_buffer_pos_t)(h + 1) & (ring_buffer_pos_t)(RX_BUFFER_SIZE - 1);
|
|
179
|
+
|
|
180
|
+ // Read the character from the USART
|
|
181
|
+ c = M_UDRx;
|
|
182
|
+
|
|
183
|
+ #if ENABLED(EMERGENCY_PARSER)
|
|
184
|
+ emergency_parser.update(emergency_state, c);
|
|
185
|
+ #endif
|
|
186
|
+
|
|
187
|
+ // If the character is to be stored at the index just before the tail
|
|
188
|
+ // (such that the head would advance to the current tail), the FIFO is
|
|
189
|
+ // full, so don't write the character or advance the head.
|
|
190
|
+ if (i != t) {
|
|
191
|
+ rx_buffer.buffer[h] = c;
|
|
192
|
+ h = i;
|
|
193
|
+ }
|
|
194
|
+ #if ENABLED(SERIAL_STATS_DROPPED_RX)
|
|
195
|
+ else if (!++rx_dropped_bytes) --rx_dropped_bytes;
|
|
196
|
+ #endif
|
|
197
|
+ }
|
|
198
|
+ sw_barrier();
|
152
|
199
|
}
|
153
|
|
- else {
|
154
|
|
- // TX interrupts disabled, but buffer still not empty ... or
|
155
|
|
- // TX interrupts enabled. Reenable TX ints and schedule XOFF
|
156
|
|
- // character to be sent
|
157
|
|
- #if TX_BUFFER_SIZE > 0
|
158
|
|
- SBI(M_UCSRxB, M_UDRIEx);
|
159
|
|
- xon_xoff_state = XOFF_CHAR;
|
160
|
|
- #else
|
161
|
|
- // We are not using TX interrupts, we will have to send this manually
|
162
|
|
- while (!TEST(M_UCSRxA, M_UDREx)) sw_barrier();
|
163
|
|
- M_UDRx = XOFF_CHAR;
|
164
|
|
-
|
165
|
|
- // clear the TXC bit -- "can be cleared by writing a one to its bit
|
166
|
|
- // location". This makes sure flush() won't return until the bytes
|
167
|
|
- // actually got written
|
168
|
|
- SBI(M_UCSRxA, M_TXCx);
|
169
|
|
-
|
170
|
|
- // And remember we already sent it
|
171
|
|
- xon_xoff_state = XOFF_CHAR | XON_XOFF_CHAR_SENT;
|
172
|
|
- #endif
|
|
200
|
+
|
|
201
|
+ M_UDRx = XOFF_CHAR;
|
|
202
|
+
|
|
203
|
+ // Clear the TXC bit -- "can be cleared by writing a one to its bit
|
|
204
|
+ // location". This makes sure flush() won't return until the bytes
|
|
205
|
+ // actually got written
|
|
206
|
+ SBI(M_UCSRxA, M_TXCx);
|
|
207
|
+
|
|
208
|
+ // At this point there could be a race condition between the write() function
|
|
209
|
+ // and this sending of the XOFF char. This interrupt could happen between the
|
|
210
|
+ // wait to be empty TX buffer loop and the actual write of the character. Since
|
|
211
|
+ // the TX buffer is full because it's sending the XOFF char, the only way to be
|
|
212
|
+ // sure the write() function will succeed is to wait for the XOFF char to be
|
|
213
|
+ // completely sent. Since an extra character could be received during the wait
|
|
214
|
+ // it must also be handled!
|
|
215
|
+ while (!TEST(M_UCSRxA, M_UDREx)) {
|
|
216
|
+
|
|
217
|
+ if (TEST(M_UCSRxA,M_RXCx)) {
|
|
218
|
+ // A char arrived while waiting for the TX buffer to be empty - Receive and process it!
|
|
219
|
+
|
|
220
|
+ i = (ring_buffer_pos_t)(h + 1) & (ring_buffer_pos_t)(RX_BUFFER_SIZE - 1);
|
|
221
|
+
|
|
222
|
+ // Read the character from the USART
|
|
223
|
+ c = M_UDRx;
|
|
224
|
+
|
|
225
|
+ #if ENABLED(EMERGENCY_PARSER)
|
|
226
|
+ emergency_parser.update(emergency_state, c);
|
|
227
|
+ #endif
|
|
228
|
+
|
|
229
|
+ // If the character is to be stored at the index just before the tail
|
|
230
|
+ // (such that the head would advance to the current tail), the FIFO is
|
|
231
|
+ // full, so don't write the character or advance the head.
|
|
232
|
+ if (i != t) {
|
|
233
|
+ rx_buffer.buffer[h] = c;
|
|
234
|
+ h = i;
|
|
235
|
+ }
|
|
236
|
+ #if ENABLED(SERIAL_STATS_DROPPED_RX)
|
|
237
|
+ else if (!++rx_dropped_bytes) --rx_dropped_bytes;
|
|
238
|
+ #endif
|
|
239
|
+ }
|
|
240
|
+ sw_barrier();
|
173
|
241
|
}
|
|
242
|
+
|
|
243
|
+ // At this point everything is ready. The write() function won't
|
|
244
|
+ // have any issues writing to the UART TX register if it needs to!
|
174
|
245
|
}
|
175
|
246
|
}
|
176
|
247
|
#endif // SERIAL_XON_XOFF
|
177
|
248
|
|
178
|
|
- #if ENABLED(EMERGENCY_PARSER)
|
179
|
|
- emergency_parser.update(emergency_state, c);
|
180
|
|
- #endif
|
|
249
|
+ // Store the new head value
|
|
250
|
+ rx_buffer.head = h;
|
181
|
251
|
}
|
182
|
252
|
|
183
|
253
|
#if TX_BUFFER_SIZE > 0
|
184
|
254
|
|
185
|
255
|
// (called with TX irqs disabled)
|
186
|
256
|
FORCE_INLINE void _tx_udr_empty_irq(void) {
|
187
|
|
- // If interrupts are enabled, there must be more data in the output
|
188
|
|
- // buffer.
|
|
257
|
+
|
|
258
|
+ // Read positions
|
|
259
|
+ uint8_t t = tx_buffer.tail;
|
|
260
|
+ const uint8_t h = tx_buffer.head;
|
189
|
261
|
|
190
|
262
|
#if ENABLED(SERIAL_XON_XOFF)
|
191
|
|
- // Do a priority insertion of an XON/XOFF char, if needed.
|
192
|
|
- const uint8_t state = xon_xoff_state;
|
193
|
|
- if (!(state & XON_XOFF_CHAR_SENT)) {
|
194
|
|
- M_UDRx = state & XON_XOFF_CHAR_MASK;
|
195
|
|
- xon_xoff_state = state | XON_XOFF_CHAR_SENT;
|
|
263
|
+ // If an XON char is pending to be sent, do it now
|
|
264
|
+ if (xon_xoff_state == XON_CHAR) {
|
|
265
|
+
|
|
266
|
+ // Send the character
|
|
267
|
+ M_UDRx = XON_CHAR;
|
|
268
|
+
|
|
269
|
+ // clear the TXC bit -- "can be cleared by writing a one to its bit
|
|
270
|
+ // location". This makes sure flush() won't return until the bytes
|
|
271
|
+ // actually got written
|
|
272
|
+ SBI(M_UCSRxA, M_TXCx);
|
|
273
|
+
|
|
274
|
+ // Remember we sent it.
|
|
275
|
+ xon_xoff_state = XON_CHAR | XON_XOFF_CHAR_SENT;
|
|
276
|
+
|
|
277
|
+ // If nothing else to transmit, just disable TX interrupts.
|
|
278
|
+ if (h == t) CBI(M_UCSRxB, M_UDRIEx); // (Non-atomic, could be reenabled by the main program, but eventually this will succeed)
|
|
279
|
+
|
|
280
|
+ return;
|
196
|
281
|
}
|
197
|
|
- else
|
198
|
282
|
#endif
|
199
|
|
- { // Send the next byte
|
200
|
|
- const uint8_t t = tx_buffer.tail, c = tx_buffer.buffer[t];
|
201
|
|
- tx_buffer.tail = (t + 1) & (TX_BUFFER_SIZE - 1);
|
202
|
|
- M_UDRx = c;
|
|
283
|
+
|
|
284
|
+ // If nothing to transmit, just disable TX interrupts. This could
|
|
285
|
+ // happen as the result of the non atomicity of the disabling of RX
|
|
286
|
+ // interrupts that could end reenabling TX interrupts as a side effect.
|
|
287
|
+ if (h == t) {
|
|
288
|
+ CBI(M_UCSRxB, M_UDRIEx); // (Non-atomic, could be reenabled by the main program, but eventually this will succeed)
|
|
289
|
+ return;
|
203
|
290
|
}
|
204
|
291
|
|
205
|
|
- // clear the TXC bit -- "can be cleared by writing a one to its bit
|
206
|
|
- // location". This makes sure flush() won't return until the bytes
|
207
|
|
- // actually got written
|
|
292
|
+ // There is something to TX, Send the next byte
|
|
293
|
+ const uint8_t c = tx_buffer.buffer[t];
|
|
294
|
+ t = (t + 1) & (TX_BUFFER_SIZE - 1);
|
|
295
|
+ M_UDRx = c;
|
|
296
|
+ tx_buffer.tail = t;
|
|
297
|
+
|
|
298
|
+ // Clear the TXC bit (by writing a one to its bit location).
|
|
299
|
+ // Ensures flush() won't return until the bytes are actually written/
|
208
|
300
|
SBI(M_UCSRxA, M_TXCx);
|
209
|
301
|
|
210
|
|
- // Disable interrupts if the buffer is empty
|
211
|
|
- if (tx_buffer.head == tx_buffer.tail)
|
212
|
|
- CBI(M_UCSRxB, M_UDRIEx);
|
|
302
|
+ // Disable interrupts if there is nothing to transmit following this byte
|
|
303
|
+ if (h == t) CBI(M_UCSRxB, M_UDRIEx); // (Non-atomic, could be reenabled by the main program, but eventually this will succeed)
|
213
|
304
|
}
|
214
|
305
|
|
215
|
306
|
#ifdef M_USARTx_UDRE_vect
|
|
@@ -253,8 +344,8 @@
|
253
|
344
|
SBI(M_UCSRxB, M_RXCIEx);
|
254
|
345
|
#if TX_BUFFER_SIZE > 0
|
255
|
346
|
CBI(M_UCSRxB, M_UDRIEx);
|
256
|
|
- _written = false;
|
257
|
347
|
#endif
|
|
348
|
+ _written = false;
|
258
|
349
|
}
|
259
|
350
|
|
260
|
351
|
void MarlinSerial::end() {
|
|
@@ -281,11 +372,11 @@
|
281
|
372
|
}
|
282
|
373
|
|
283
|
374
|
int MarlinSerial::read(void) {
|
284
|
|
- int v;
|
285
|
375
|
|
286
|
376
|
#if RX_BUFFER_SIZE > 256
|
287
|
|
- // Disable RX interrupts to ensure atomic reads
|
288
|
|
- const bool isr_enabled = TEST(M_UCSRxB, M_RXCIEx);
|
|
377
|
+ // Disable RX interrupts to ensure atomic reads - This could reenable TX interrupts,
|
|
378
|
+ // but this situation is explicitly handled at the TX isr, so no problems there
|
|
379
|
+ bool isr_enabled = TEST(M_UCSRxB, M_RXCIEx);
|
289
|
380
|
CBI(M_UCSRxB, M_RXCIEx);
|
290
|
381
|
#endif
|
291
|
382
|
|
|
@@ -298,43 +389,50 @@
|
298
|
389
|
|
299
|
390
|
ring_buffer_pos_t t = rx_buffer.tail;
|
300
|
391
|
|
301
|
|
- if (h == t)
|
302
|
|
- v = -1;
|
303
|
|
- else {
|
304
|
|
- v = rx_buffer.buffer[t];
|
305
|
|
- t = (ring_buffer_pos_t)(t + 1) & (RX_BUFFER_SIZE - 1);
|
306
|
|
-
|
307
|
|
- #if RX_BUFFER_SIZE > 256
|
308
|
|
- // Disable RX interrupts to ensure atomic write to tail, so
|
309
|
|
- // the RX isr can't read partially updated values
|
310
|
|
- const bool isr_enabled = TEST(M_UCSRxB, M_RXCIEx);
|
311
|
|
- CBI(M_UCSRxB, M_RXCIEx);
|
312
|
|
- #endif
|
|
392
|
+ // If nothing to read, return now
|
|
393
|
+ if (h == t) return -1;
|
313
|
394
|
|
314
|
|
- // Advance tail
|
315
|
|
- rx_buffer.tail = t;
|
|
395
|
+ // Get the next char
|
|
396
|
+ const int v = rx_buffer.buffer[t];
|
|
397
|
+ t = (ring_buffer_pos_t)(t + 1) & (RX_BUFFER_SIZE - 1);
|
316
|
398
|
|
317
|
|
- #if RX_BUFFER_SIZE > 256
|
318
|
|
- // End critical section
|
319
|
|
- if (isr_enabled) SBI(M_UCSRxB, M_RXCIEx);
|
320
|
|
- #endif
|
|
399
|
+ #if RX_BUFFER_SIZE > 256
|
|
400
|
+ // Disable RX interrupts to ensure atomic write to tail, so
|
|
401
|
+ // the RX isr can't read partially updated values - This could
|
|
402
|
+ // reenable TX interrupts, but this situation is explicitly
|
|
403
|
+ // handled at the TX isr, so no problems there
|
|
404
|
+ isr_enabled = TEST(M_UCSRxB, M_RXCIEx);
|
|
405
|
+ CBI(M_UCSRxB, M_RXCIEx);
|
|
406
|
+ #endif
|
321
|
407
|
|
322
|
|
- #if ENABLED(SERIAL_XON_XOFF)
|
323
|
|
- if ((xon_xoff_state & XON_XOFF_CHAR_MASK) == XOFF_CHAR) {
|
|
408
|
+ // Advance tail
|
|
409
|
+ rx_buffer.tail = t;
|
324
|
410
|
|
325
|
|
- // Get count of bytes in the RX buffer
|
326
|
|
- ring_buffer_pos_t rx_count = (ring_buffer_pos_t)(h - t) & (ring_buffer_pos_t)(RX_BUFFER_SIZE - 1);
|
|
411
|
+ #if RX_BUFFER_SIZE > 256
|
|
412
|
+ // End critical section
|
|
413
|
+ if (isr_enabled) SBI(M_UCSRxB, M_RXCIEx);
|
|
414
|
+ #endif
|
327
|
415
|
|
328
|
|
- // When below 10% of RX buffer capacity, send XON before
|
329
|
|
- // running out of RX buffer bytes
|
330
|
|
- if (rx_count < (RX_BUFFER_SIZE) / 10) {
|
|
416
|
+ #if ENABLED(SERIAL_XON_XOFF)
|
|
417
|
+ // If the XOFF char was sent, or about to be sent...
|
|
418
|
+ if ((xon_xoff_state & XON_XOFF_CHAR_MASK) == XOFF_CHAR) {
|
|
419
|
+ // Get count of bytes in the RX buffer
|
|
420
|
+ const ring_buffer_pos_t rx_count = (ring_buffer_pos_t)(h - t) & (ring_buffer_pos_t)(RX_BUFFER_SIZE - 1);
|
|
421
|
+ if (rx_count < (RX_BUFFER_SIZE) / 10) {
|
|
422
|
+ #if TX_BUFFER_SIZE > 0
|
|
423
|
+ // Signal we want an XON character to be sent.
|
|
424
|
+ xon_xoff_state = XON_CHAR;
|
|
425
|
+ // Enable TX isr. Non atomic, but it will eventually enable them
|
|
426
|
+ SBI(M_UCSRxB, M_UDRIEx);
|
|
427
|
+ #else
|
|
428
|
+ // If not using TX interrupts, we must send the XON char now
|
331
|
429
|
xon_xoff_state = XON_CHAR | XON_XOFF_CHAR_SENT;
|
332
|
|
- write(XON_CHAR);
|
333
|
|
- return v;
|
334
|
|
- }
|
|
430
|
+ while (!TEST(M_UCSRxA, M_UDREx)) sw_barrier();
|
|
431
|
+ M_UDRx = XON_CHAR;
|
|
432
|
+ #endif
|
335
|
433
|
}
|
336
|
|
- #endif
|
337
|
|
- }
|
|
434
|
+ }
|
|
435
|
+ #endif
|
338
|
436
|
|
339
|
437
|
return v;
|
340
|
438
|
}
|
|
@@ -367,9 +465,19 @@
|
367
|
465
|
#endif
|
368
|
466
|
|
369
|
467
|
#if ENABLED(SERIAL_XON_XOFF)
|
|
468
|
+ // If the XOFF char was sent, or about to be sent...
|
370
|
469
|
if ((xon_xoff_state & XON_XOFF_CHAR_MASK) == XOFF_CHAR) {
|
371
|
|
- xon_xoff_state = XON_CHAR | XON_XOFF_CHAR_SENT;
|
372
|
|
- write(XON_CHAR);
|
|
470
|
+ #if TX_BUFFER_SIZE > 0
|
|
471
|
+ // Signal we want an XON character to be sent.
|
|
472
|
+ xon_xoff_state = XON_CHAR;
|
|
473
|
+ // Enable TX isr. Non atomic, but it will eventually enable it.
|
|
474
|
+ SBI(M_UCSRxB, M_UDRIEx);
|
|
475
|
+ #else
|
|
476
|
+ // If not using TX interrupts, we must send the XON char now
|
|
477
|
+ xon_xoff_state = XON_CHAR | XON_XOFF_CHAR_SENT;
|
|
478
|
+ while (!TEST(M_UCSRxA, M_UDREx)) sw_barrier();
|
|
479
|
+ M_UDRx = XON_CHAR;
|
|
480
|
+ #endif
|
373
|
481
|
}
|
374
|
482
|
#endif
|
375
|
483
|
}
|
|
@@ -383,6 +491,8 @@
|
383
|
491
|
// be done. This shortcut helps significantly improve the
|
384
|
492
|
// effective datarate at high (>500kbit/s) bitrates, where
|
385
|
493
|
// interrupt overhead becomes a slowdown.
|
|
494
|
+ // Yes, there is a race condition between the sending of the
|
|
495
|
+ // XOFF char at the RX isr, but it is properly handled there
|
386
|
496
|
if (!TEST(M_UCSRxB, M_UDRIEx) && TEST(M_UCSRxA, M_UDREx)) {
|
387
|
497
|
M_UDRx = c;
|
388
|
498
|
|
|
@@ -395,61 +505,79 @@
|
395
|
505
|
|
396
|
506
|
const uint8_t i = (tx_buffer.head + 1) & (TX_BUFFER_SIZE - 1);
|
397
|
507
|
|
398
|
|
- // If the output buffer is full, there's nothing for it other than to
|
399
|
|
- // wait for the interrupt handler to empty it a bit
|
400
|
|
- while (i == tx_buffer.tail) {
|
401
|
|
- if (!ISRS_ENABLED()) {
|
402
|
|
- // Interrupts are disabled, so we'll have to poll the data
|
403
|
|
- // register empty flag ourselves. If it is set, pretend an
|
404
|
|
- // interrupt has happened and call the handler to free up
|
405
|
|
- // space for us.
|
406
|
|
- if (TEST(M_UCSRxA, M_UDREx))
|
407
|
|
- _tx_udr_empty_irq();
|
408
|
|
- }
|
409
|
|
- // (else , the interrupt handler will free up space for us)
|
|
508
|
+ // If global interrupts are disabled (as the result of being called from an ISR)...
|
|
509
|
+ if (!ISRS_ENABLED()) {
|
|
510
|
+
|
|
511
|
+ // Make room by polling if it is possible to transmit, and do so!
|
|
512
|
+ while (i == tx_buffer.tail) {
|
|
513
|
+
|
|
514
|
+ // If we can transmit another byte, do it.
|
|
515
|
+ if (TEST(M_UCSRxA, M_UDREx)) _tx_udr_empty_irq();
|
410
|
516
|
|
411
|
|
- // Make sure compiler rereads tx_buffer.tail
|
412
|
|
- sw_barrier();
|
|
517
|
+ // Make sure compiler rereads tx_buffer.tail
|
|
518
|
+ sw_barrier();
|
|
519
|
+ }
|
|
520
|
+ }
|
|
521
|
+ else {
|
|
522
|
+ // Interrupts are enabled, just wait until there is space
|
|
523
|
+ while (i == tx_buffer.tail) { sw_barrier(); }
|
413
|
524
|
}
|
414
|
525
|
|
415
|
526
|
// Store new char. head is always safe to move
|
416
|
527
|
tx_buffer.buffer[tx_buffer.head] = c;
|
417
|
528
|
tx_buffer.head = i;
|
418
|
529
|
|
419
|
|
- // Enable TX isr
|
|
530
|
+ // Enable TX isr - Non atomic, but it will eventually enable TX isr
|
420
|
531
|
SBI(M_UCSRxB, M_UDRIEx);
|
421
|
|
- return;
|
422
|
532
|
}
|
423
|
533
|
|
424
|
534
|
void MarlinSerial::flushTX(void) {
|
425
|
|
- // TX
|
426
|
|
- // If we have never written a byte, no need to flush. This special
|
427
|
|
- // case is needed since there is no way to force the TXC (transmit
|
428
|
|
- // complete) bit to 1 during initialization
|
429
|
|
- if (!_written)
|
430
|
|
- return;
|
|
535
|
+ // No bytes written, no need to flush. This special case is needed since there's
|
|
536
|
+ // no way to force the TXC (transmit complete) bit to 1 during initialization.
|
|
537
|
+ if (!_written) return;
|
|
538
|
+
|
|
539
|
+ // If global interrupts are disabled (as the result of being called from an ISR)...
|
|
540
|
+ if (!ISRS_ENABLED()) {
|
|
541
|
+
|
|
542
|
+ // Wait until everything was transmitted - We must do polling, as interrupts are disabled
|
|
543
|
+ while (tx_buffer.head != tx_buffer.tail || !TEST(M_UCSRxA, M_TXCx)) {
|
431
|
544
|
|
432
|
|
- while (TEST(M_UCSRxB, M_UDRIEx) || !TEST(M_UCSRxA, M_TXCx)) {
|
433
|
|
- if (!ISRS_ENABLED()) {
|
434
|
|
- // Interrupts are globally disabled, but the DR empty
|
435
|
|
- // interrupt should be enabled, so poll the DR empty flag to
|
436
|
|
- // prevent deadlock
|
|
545
|
+ // If there is more space, send an extra character
|
437
|
546
|
if (TEST(M_UCSRxA, M_UDREx))
|
438
|
547
|
_tx_udr_empty_irq();
|
|
548
|
+
|
|
549
|
+ sw_barrier();
|
439
|
550
|
}
|
440
|
|
- sw_barrier();
|
|
551
|
+
|
441
|
552
|
}
|
442
|
|
- // If we get here, nothing is queued anymore (DRIE is disabled) and
|
|
553
|
+ else {
|
|
554
|
+ // Wait until everything was transmitted
|
|
555
|
+ while (tx_buffer.head != tx_buffer.tail || !TEST(M_UCSRxA, M_TXCx)) sw_barrier();
|
|
556
|
+ }
|
|
557
|
+
|
|
558
|
+ // At this point nothing is queued anymore (DRIE is disabled) and
|
443
|
559
|
// the hardware finished transmission (TXC is set).
|
444
|
560
|
}
|
445
|
561
|
|
446
|
562
|
#else // TX_BUFFER_SIZE == 0
|
447
|
563
|
|
448
|
564
|
void MarlinSerial::write(const uint8_t c) {
|
|
565
|
+ _written = true;
|
449
|
566
|
while (!TEST(M_UCSRxA, M_UDREx)) sw_barrier();
|
450
|
567
|
M_UDRx = c;
|
451
|
568
|
}
|
452
|
569
|
|
|
570
|
+ void MarlinSerial::flushTX(void) {
|
|
571
|
+ // No bytes written, no need to flush. This special case is needed since there's
|
|
572
|
+ // no way to force the TXC (transmit complete) bit to 1 during initialization.
|
|
573
|
+ if (!_written) return;
|
|
574
|
+
|
|
575
|
+ // Wait until everything was transmitted
|
|
576
|
+ while (!TEST(M_UCSRxA, M_TXCx)) sw_barrier();
|
|
577
|
+
|
|
578
|
+ // At this point nothing is queued anymore (DRIE is disabled) and
|
|
579
|
+ // the hardware finished transmission (TXC is set).
|
|
580
|
+ }
|
453
|
581
|
#endif // TX_BUFFER_SIZE == 0
|
454
|
582
|
|
455
|
583
|
/**
|
|
@@ -473,13 +601,9 @@
|
473
|
601
|
}
|
474
|
602
|
|
475
|
603
|
void MarlinSerial::print(long n, int base) {
|
476
|
|
- if (base == 0)
|
477
|
|
- write(n);
|
|
604
|
+ if (base == 0) write(n);
|
478
|
605
|
else if (base == 10) {
|
479
|
|
- if (n < 0) {
|
480
|
|
- print('-');
|
481
|
|
- n = -n;
|
482
|
|
- }
|
|
606
|
+ if (n < 0) { print('-'); n = -n; }
|
483
|
607
|
printNumber(n, 10);
|
484
|
608
|
}
|
485
|
609
|
else
|