35
35
36
36
#if MICROPY_PY_SELECT
37
37
38
+ #if MICROPY_PY_SELECT_SELECT && MICROPY_PY_SELECT_POSIX_OPTIMISATIONS
39
+ #error "select.select is not supported with MICROPY_PY_SELECT_POSIX_OPTIMISATIONS"
40
+ #endif
41
+
42
+ #if MICROPY_PY_SELECT_POSIX_OPTIMISATIONS
43
+ #include <poll.h>
44
+ #if !((MP_STREAM_POLL_RD ) == (POLLIN ) && \
45
+ (MP_STREAM_POLL_WR ) == (POLLOUT ) && \
46
+ (MP_STREAM_POLL_ERR ) == (POLLERR ) && \
47
+ (MP_STREAM_POLL_HUP ) == (POLLHUP ) && \
48
+ (MP_STREAM_POLL_NVAL ) == (POLLNVAL ))
49
+ #error "With MICROPY_PY_SELECT_POSIX_OPTIMISATIONS enabled, POLL constants must match"
50
+ #endif
51
+ #endif
52
+
38
53
// Flags for poll()
39
54
#define FLAG_ONESHOT (1)
40
55
41
56
// A single pollable object.
42
57
typedef struct _poll_obj_t {
43
58
mp_obj_t obj ;
44
59
mp_uint_t (* ioctl )(mp_obj_t obj , mp_uint_t request , uintptr_t arg , int * errcode );
60
+ #if MICROPY_PY_SELECT_POSIX_OPTIMISATIONS
61
+ struct pollfd * pollfd ;
62
+ uint16_t nonsys_flags ;
63
+ uint16_t nonsys_flags_ret ;
64
+ #else
45
65
mp_uint_t flags ;
46
66
mp_uint_t flags_ret
579F
span>;
67
+ #endif
47
68
} poll_obj_t ;
48
69
49
70
// A group of pollable objects.
50
71
typedef struct _poll_group_t {
51
72
// Map containing a dict with key=object to poll, value=its corresponding poll_obj_t.
52
73
mp_map_t map ;
74
+
75
+ #if MICROPY_PY_SELECT_POSIX_OPTIMISATIONS
76
+ // Array of pollfd entries for objects that have a file descriptor.
77
+ unsigned short alloc ;
78
+ unsigned short len ;
79
+ struct pollfd * pollfds ;
80
+ #endif
53
81
} poll_group_t ;
54
82
55
83
STATIC void poll_group_init (poll_group_t * poll_group , size_t n ) {
56
84
mp_map_init (& poll_group -> map , n );
85
+ #if MICROPY_PY_SELECT_POSIX_OPTIMISATIONS
86
+ poll_group -> alloc = 0 ;
87
+ poll_group -> len = 0 ;
88
+ poll_group -> pollfds = NULL ;
89
+ #endif
57
90
}
58
91
59
92
#if MICROPY_PY_SELECT_SELECT
@@ -62,25 +95,141 @@ STATIC void poll_group_deinit(poll_group_t *poll_group) {
62
95
}
63
96
#endif
64
97
98
+ #if MICROPY_PY_SELECT_POSIX_OPTIMISATIONS
99
+
100
+ STATIC mp_uint_t poll_obj_get_flags (poll_obj_t * poll_obj ) {
101
+ assert (poll_obj -> pollfd == NULL );
102
+ return poll_obj -> nonsys_flags ;
103
+ }
104
+
105
+ STATIC void poll_obj_set_flags (poll_obj_t * poll_obj , mp_uint_t flags ) {
106
+ if (poll_obj -> pollfd != NULL ) {
107
+ poll_obj -> pollfd -> events = flags ;
108
+ } else {
109
+ poll_obj -> nonsys_flags = flags ;
110
+ }
111
+ }
112
+
113
+ STATIC mp_uint_t poll_obj_get_flags_ret (poll_obj_t * poll_obj ) {
114
+ if (poll_obj -> pollfd != NULL ) {
115
+ return poll_obj -> pollfd -> revents ;
116
+ } else {
117
+ return poll_obj -> nonsys_flags_ret ;
118
+ }
119
+ }
120
+
121
+ STATIC void poll_obj_set_flags_ret (poll_obj_t * poll_obj , mp_uint_t flags_ret ) {
122
+ if (poll_obj -> pollfd != NULL ) {
123
+ poll_obj -> pollfd -> revents = flags_ret ;
124
+ } else {
125
+ poll_obj -> nonsys_flags_ret = flags_ret ;
126
+ }
127
+ }
128
+
129
+ STATIC struct pollfd * poll_group_add_fd (poll_group_t * poll_group , int fd ) {
130
+ struct pollfd * free_slot = NULL ;
131
+ for (unsigned int i = 0 ; i < poll_group -> len ; ++ i ) {
132
+ struct pollfd * slot = & poll_group -> pollfds [i ];
133
+ if (slot -> fd == -1 ) {
134
+ free_slot = slot ;
135
+ break ;
136
+ }
137
+ }
138
+
139
+ if (free_slot == NULL ) {
140
+ if (poll_group -> len >= poll_group -> alloc ) {
141
+ poll_group -> pollfds = m_renew (struct pollfd , poll_group -> pollfds , poll_group -> alloc , poll_group -> alloc + 4 );
142
+ poll_group -> alloc += 4 ;
143
+ }
144
+ free_slot = & poll_group -> pollfds [poll_group -> len ++ ];
145
+ }
146
+
147
+ free_slot -> fd = fd ;
148
+
149
+ return free_slot ;
150
+ }
151
+
152
+ static inline bool poll_group_all_are_fds (poll_group_t * poll_group ) {
153
+ return poll_group -> map .used == poll_group -> len ;
154
+ }
155
+
156
+ #else
157
+
158
+ static inline mp_uint_t poll_obj_get_flags (poll_obj_t * poll_obj ) {
159
+ return poll_obj -> flags ;
160
+ }
161
+
162
+ static inline void poll_obj_set_flags (poll_obj_t * poll_obj , mp_uint_t flags ) {
163
+ poll_obj -> flags = flags ;
164
+ }
165
+
166
+ static inline mp_uint_t poll_obj_get_flags_ret (poll_obj_t * poll_obj ) {
167
+ return poll_obj -> flags_ret ;
168
+ }
169
+
170
+ static inline void poll_obj_set_flags_ret (poll_obj_t * poll_obj , mp_uint_t flags_ret ) {
171
+ poll_obj -> flags_ret = flags_ret ;
172
+ }
173
+
174
+ #endif
175
+
65
176
STATIC void poll_group_add (poll_group_t * poll_group , const mp_obj_t * obj , mp_uint_t obj_len , mp_uint_t flags , bool or_flags ) {
66
177
for (mp_uint_t i = 0 ; i < obj_len ; i ++ ) {
67
178
mp_map_elem_t * elem = mp_map_lookup (& poll_group -> map , mp_obj_id (obj [i ]), MP_MAP_LOOKUP_ADD_IF_NOT_FOUND );
68
179
if (elem -> value == MP_OBJ_NULL ) {
69
180
// object not found; get its ioctl and add it to the poll list
70
- const mp_stream_p_t * stream_p = mp_get_stream_raise (obj [i ], MP_STREAM_OP_IOCTL );
181
+
182
+ // If an exception is raised below when adding the new object then the map entry for that
183
+ // object remains unpopulated, and methods like poll() may crash. This case is not handled.
184
+
71
185
poll_obj_t * poll_obj = m_new_obj (poll_obj_t );
72
186
poll_obj -> obj = obj [i ];
187
+
188
+ #if MICROPY_PY_SELECT_POSIX_OPTIMISATIONS
189
+ int fd = -1 ;
190
+ if (mp_obj_is_int (obj [i ])) {
191
+ // A file descriptor integer passed in as the object, so use it directly.
192
+ fd = mp_obj_get_int (obj [i ]);
193
+ if (fd < 0 ) {
194
+ mp_raise_ValueError (NULL );
195
+ }
196
+ poll_obj -> ioctl = NULL ;
197
+ } else {
198
+ // An object passed in. Check if it has a file descriptor.
199
+ const mp_stream_p_t * stream_p = mp_get_stream_raise (obj [i ], MP_STREAM_OP_IOCTL );
200
+ poll_obj -> ioctl = stream_p -> ioctl ;
201
+ int err ;
202
+ mp_uint_t res = stream_p -> ioctl (obj [i ], MP_STREAM_GET_FILENO , 0 , & err );
203
+ if (res != MP_STREAM_ERROR ) {
204
+ fd = res ;
205
+ }
206
+ }
207
+ if (fd >= 0 ) {
208
+ // Object has a file descriptor so add it to pollfds.
209
+ poll_obj -> pollfd = poll_group_add_fd (poll_group , fd );
210
+ } else {
211
+ // Object doesn't have a file descriptor.
212
+ poll_obj -> pollfd = NULL ;
213
+ }
214
+ #else
215
+ const mp_stream_p_t * stream_p = mp_get_stream_raise (obj [i ], MP_STREAM_OP_IOCTL );
73
216
poll_obj -> ioctl = stream_p -> ioctl ;
74
- poll_obj -> flags = flags ;
75
- poll_obj -> flags_ret = 0 ;
217
+ #endif
218
+
219
+ poll_obj_set_flags (poll_obj , flags );
220
+ poll_obj_set_flags_ret (poll_obj , 0 );
76
221
elem -> value = MP_OBJ_FROM_PTR (poll_obj );
77
222
} else {
78
223
// object exists; update its flags
224
+ poll_obj_t * poll_obj = (poll_obj_t * )MP_OBJ_TO_PTR (elem -> value );
225
+ #if MICROPY_PY_SELECT_SELECT
79
226
if (or_flags ) {
80
- ((poll_obj_t * )MP_OBJ_TO_PTR (elem -> value ))-> flags |= flags ;
81
- } else {
82
- ((poll_obj_t * )MP_OBJ_TO_PTR (elem -> value ))-> flags = flags ;
227
+ flags |= poll_obj_get_flags (poll_obj );
83
228
}
229
+ #else
230
+ (void )or_flags ;
231
+ #endif
232
+ poll_obj_set_flags (poll_obj , flags );
84
233
}
85
234
}
86
235
}
@@ -94,9 +243,17 @@ STATIC mp_uint_t poll_group_poll_single_round(poll_group_t *poll_group, size_t *
94
243
}
95
244
96
245
poll_obj_t * poll_obj = MP_OBJ_TO_PTR (poll_group -> map.table [i ].value );
246
+
247
+ #if MICROPY_PY_SELECT_POSIX_OPTIMISATIONS
248
+ if (poll_obj -> pollfd != NULL ) {
249
+ // Object has file descriptor so will be polled separately by poll().
250
+ continue ;
251
+ }
252
+ #endif
253
+
97
254
int errcode ;
98
- mp_int_t ret = poll_obj -> ioctl (poll_obj -> obj , MP_STREAM_POLL , poll_obj -> flags , & errcode );
99
- poll_obj -> flags_ret = ret ;
255
+ mp_int_t ret = poll_obj -> ioctl (poll_obj -> obj , MP_STREAM_POLL , poll_obj_get_flags ( poll_obj ) , & errcode );
256
+ poll_obj_set_flags_ret ( poll_obj , ret ) ;
100
257
101
258
if (ret == -1 ) {
102
259
// error doing ioctl
@@ -106,6 +263,7 @@ STATIC mp_uint_t poll_group_poll_single_round(poll_group_t *poll_group, size_t *
106
263
if (ret != 0 ) {
107
264
// object is ready
108
265
n_ready += 1 ;
266
+ #if MICROPY_PY_SELECT_SELECT
109
267
if (rwx_num != NULL ) {
110
268
if (ret & MP_STREAM_POLL_RD ) {
111
269
rwx_num [0 ] += 1 ;
@@ -117,21 +275,75 @@ STATIC mp_uint_t poll_group_poll_single_round(poll_group_t *poll_group, size_t *
117
275
rwx_num [2 ] += 1 ;
118
276
}
119
277
}
278
+ #else
279
+ (void )rwx_num ;
280
+ #endif
120
281
}
121
282
}
122
283
return n_ready ;
123
284
}
124
285
125
286
STATIC mp_uint_t poll_group_poll_all (poll_group_t * poll_group , size_t * rwx_num , mp_uint_t timeout ) {
126
- mp_uint_t start_tick = mp_hal_ticks_ms ();
287
+ mp_uint_t start_ticks = mp_hal_ticks_ms ();
288
+
289
+ #if MICROPY_PY_SELECT_POSIX_OPTIMISATIONS
290
+
291
+ for (;;) {
292
+ MP_THREAD_GIL_EXIT ();
293
+
294
+ // Compute the timeout.
295
+ int t = 1 ;
296
+ if (poll_group_all_are_fds (poll_group )) {
297
+ if (timeout == (mp_uint_t )- 1 ) {
298
+ t = -1 ;
299
+ } else {
300
+ mp_uint_t delta = mp_hal_ticks_ms () - start_ticks ;
301
+ if (delta >= timeout ) {
302
+ t = 0 ;
303
+ } else {
304
+ t = timeout - delta ;
305
+ }
306
+ }
307
+ }
308
+
309
+ // Call system poll for those objects that have a file descriptor.
310
+ int n_ready = poll (poll_group -> pollfds , poll_group -> len , t );
311
+
312
+ MP_THREAD_GIL_ENTER ();
313
+
314
+ if (n_ready == -1 ) {
315
+ int err = errno ;
316
+ if (err != EINTR ) {
317
+ mp_raise_OSError (err );
318
+ }
319
+ n_ready = 0 ;
320
+ }
321
+
322
+ // Explicitly poll any objects that do not have a file descriptor.
323
+ if (!poll_group_all_are_fds (poll_group )) {
324
+ n_ready += poll_group_poll_single_round (poll_group , rwx_num );
325
+ }
326
+
327
+ // Return if an object is ready, or if the timeout expired.
328
+ if (n_ready > 0 || (timeout != (mp_uint_t )- 1 && mp_hal_ticks_ms () - start_ticks >= timeout )) {
329
+ return n_ready ;
330
+ }
331
+
332
+ mp_handle_pending (true);
333
+ }
334
+
335
+ #else
336
+
127
337
for (;;) {
128
338
// poll the objects
129
339
mp_uint_t n_ready = poll_group_poll_single_round (poll_group , rwx_num );
130
- if (n_ready > 0 || (timeout != (mp_uint_t )- 1 && mp_hal_ticks_ms () - start_tick >= timeout )) {
340
+ if (n_ready > 0 || (timeout != (mp_uint_t )- 1 && mp_hal_ticks_ms () - start_ticks >= timeout )) {
131
341
return n_ready ;
132
342
}
133
343
MICROPY_EVENT_POLL_HOOK
134
344
}
345
+
346
+ #endif
135
347
}
136
348
137
349
#if MICROPY_PY_SELECT_SELECT
@@ -224,7 +436,20 @@ MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(poll_register_obj, 2, 3, poll_register);
224
436
// unregister(obj)
225
437
STATIC mp_obj_t poll_unregister (mp_obj_t self_in , mp_obj_t obj_in ) {
226
438
mp_obj_poll_t * self = MP_OBJ_TO_PTR (self_in );
227
- mp_map_lookup (& self -> poll_group .map , mp_obj_id (obj_in ), MP_MAP_LOOKUP_REMOVE_IF_FOUND );
439
+ mp_map_elem_t * elem = mp_map_lookup (& self -> poll_group .map , mp_obj_id (obj_in ), MP_MAP_LOOKUP_REMOVE_IF_FOUND );
440
+
441
+ #if MICROPY_PY_SELECT_POSIX_OPTIMISATIONS
442
+ if (elem != NULL ) {
443
+ poll_obj_t * poll_obj = (poll_obj_t * )MP_OBJ_TO_PTR (elem -> value );
444
+ if (poll_obj -> pollfd != NULL ) {
445
+ poll_obj -> pollfd -> fd = -1 ;
446
+ }
447
+ elem -> value = MP_OBJ_NULL ;
448
+ }
449
+ #else
450
+ (void )elem ;
451
+ #endif
452
+
228
453
// TODO raise KeyError if obj didn't exist in map
229
454
return mp_const_none ;
230
455
}
@@ -237,7 +462,7 @@ STATIC mp_obj_t poll_modify(mp_obj_t self_in, mp_obj_t obj_in, mp_obj_t eventmas
237
462
if (elem == NULL ) {
238
463
mp_raise_OSError (MP_ENOENT );
239
464
}
240
- ((poll_obj_t * )MP_OBJ_TO_PTR (elem -> value )) -> flags = mp_obj_get_int (eventmask_in );
465
+ poll_obj_set_flags ((poll_obj_t * )MP_OBJ_TO_PTR (elem -> value ), mp_obj_get_int (eventmask_in ) );
241
466
return mp_const_none ;
242
467
}
243
468
MP_DEFINE_CONST_FUN_OBJ_3 (poll_modify_obj , poll_modify );
@@ -277,12 +502,12 @@ STATIC mp_obj_t poll_poll(size_t n_args, const mp_obj_t *args) {
277
502
continue ;
278
503
}
279
504
poll_obj_t * poll_obj = MP_OBJ_TO_PTR (self -> poll_group .map .table [i ].value );
280
- if (poll_obj -> flags_ret != 0 ) {
281
- mp_obj_t tuple [2 ] = {poll_obj -> obj , MP_OBJ_NEW_SMALL_INT (poll_obj -> flags_ret )};
505
+ if (poll_obj_get_flags_ret ( poll_obj ) != 0 ) {
506
+ mp_obj_t tuple [2 ] = {poll_obj -> obj , MP_OBJ_NEW_SMALL_INT (poll_obj_get_flags_ret ( poll_obj ) )};
282
507
ret_list -> items [n_ready ++ ] = mp_obj_new_tuple (2 , tuple );
283
508
if (self -> flags & FLAG_ONESHOT ) {
284
509
// Don't poll next time, until new event flags will be set explicitly
285
- poll_obj -> flags = 0 ;
510
+ poll_obj_set_flags ( poll_obj , 0 ) ;
286
511
}
287
512
}
288
513
}
@@ -320,13 +545,13 @@ STATIC mp_obj_t poll_iternext(mp_obj_t self_in) {
320
545
continue ;
321
546
}
322
547
poll_obj_t * poll_obj = MP_OBJ_TO_PTR (self -> poll_group .map .table [i ].value );
323
- if (poll_obj -> flags_ret != 0 ) {
548
+ if (poll_obj_get_flags_ret ( poll_obj ) != 0 ) {
324
549
mp_obj_tuple_t * t = MP_OBJ_TO_PTR (self -> ret_tuple );
325
550
t -> items [0 ] = poll_obj -> obj ;
326
- t -> items [1 ] = MP_OBJ_NEW_SMALL_INT (poll_obj -> flags_ret );
551
+ t -> items [1 ] = MP_OBJ_NEW_SMALL_INT (poll_obj_get_flags_ret ( poll_obj ) );
327
552
if (self -> flags & FLAG_ONESHOT ) {
328
553
// Don't poll next time, until new event flags will be set explicitly
329
- poll_obj -> flags = 0 ;
554
+ poll_obj_set_flags ( poll_obj , 0 ) ;
330
555
}
331
556
return MP_OBJ_FROM_PTR (t );
332
557
}
0 commit comments