1// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_V8_PLATFORM_H_
6#define V8_V8_PLATFORM_H_
7
8#include <stddef.h>
9#include <stdint.h>
10#include <stdlib.h> // For abort.
11#include <memory>
12#include <string>
13
14#include "v8config.h" // NOLINT(build/include)
15
16namespace v8 {
17
18class Isolate;
19
20/**
21 * A Task represents a unit of work.
22 */
23class Task {
24 public:
25 virtual ~Task() = default;
26
27 virtual void Run() = 0;
28};
29
30/**
31 * An IdleTask represents a unit of work to be performed in idle time.
32 * The Run method is invoked with an argument that specifies the deadline in
33 * seconds returned by MonotonicallyIncreasingTime().
34 * The idle task is expected to complete by this deadline.
35 */
36class IdleTask {
37 public:
38 virtual ~IdleTask() = default;
39 virtual void Run(double deadline_in_seconds) = 0;
40};
41
42/**
43 * A TaskRunner allows scheduling of tasks. The TaskRunner may still be used to
44 * post tasks after the isolate gets destructed, but these tasks may not get
45 * executed anymore. All tasks posted to a given TaskRunner will be invoked in
46 * sequence. Tasks can be posted from any thread.
47 */
48class TaskRunner {
49 public:
50 /**
51 * Schedules a task to be invoked by this TaskRunner. The TaskRunner
52 * implementation takes ownership of |task|.
53 */
54 virtual void PostTask(std::unique_ptr<Task> task) = 0;
55
56 /**
57 * Schedules a task to be invoked by this TaskRunner. The TaskRunner
58 * implementation takes ownership of |task|. The |task| cannot be nested
59 * within other task executions.
60 *
61 * Requires that |TaskRunner::NonNestableTasksEnabled()| is true.
62 */
63 virtual void PostNonNestableTask(std::unique_ptr<Task> task) {}
64
65 /**
66 * Schedules a task to be invoked by this TaskRunner. The task is scheduled
67 * after the given number of seconds |delay_in_seconds|. The TaskRunner
68 * implementation takes ownership of |task|.
69 */
70 virtual void PostDelayedTask(std::unique_ptr<Task> task,
71 double delay_in_seconds) = 0;
72
73 /**
74 * Schedules a task to be invoked by this TaskRunner. The task is scheduled
75 * after the given number of seconds |delay_in_seconds|. The TaskRunner
76 * implementation takes ownership of |task|. The |task| cannot be nested
77 * within other task executions.
78 *
79 * Requires that |TaskRunner::NonNestableDelayedTasksEnabled()| is true.
80 */
81 virtual void PostNonNestableDelayedTask(std::unique_ptr<Task> task,
82 double delay_in_seconds) {}
83
84 /**
85 * Schedules an idle task to be invoked by this TaskRunner. The task is
86 * scheduled when the embedder is idle. Requires that
87 * |TaskRunner::IdleTasksEnabled()| is true. Idle tasks may be reordered
88 * relative to other task types and may be starved for an arbitrarily long
89 * time if no idle time is available. The TaskRunner implementation takes
90 * ownership of |task|.
91 */
92 virtual void PostIdleTask(std::unique_ptr<IdleTask> task) = 0;
93
94 /**
95 * Returns true if idle tasks are enabled for this TaskRunner.
96 */
97 virtual bool IdleTasksEnabled() = 0;
98
99 /**
100 * Returns true if non-nestable tasks are enabled for this TaskRunner.
101 */
102 virtual bool NonNestableTasksEnabled() const { return false; }
103
104 /**
105 * Returns true if non-nestable delayed tasks are enabled for this TaskRunner.
106 */
107 virtual bool NonNestableDelayedTasksEnabled() const { return false; }
108
109 TaskRunner() = default;
110 virtual ~TaskRunner() = default;
111
112 private:
113 TaskRunner(const TaskRunner&) = delete;
114 TaskRunner& operator=(const TaskRunner&) = delete;
115};
116
117/**
118 * The interface represents complex arguments to trace events.
119 */
120class ConvertableToTraceFormat {
121 public:
122 virtual ~ConvertableToTraceFormat() = default;
123
124 /**
125 * Append the class info to the provided |out| string. The appended
126 * data must be a valid JSON object. Strings must be properly quoted, and
127 * escaped. There is no processing applied to the content after it is
128 * appended.
129 */
130 virtual void AppendAsTraceFormat(std::string* out) const = 0;
131};
132
133/**
134 * V8 Tracing controller.
135 *
136 * Can be implemented by an embedder to record trace events from V8.
137 */
138class TracingController {
139 public:
140 virtual ~TracingController() = default;
141
142 /**
143 * Called by TRACE_EVENT* macros, don't call this directly.
144 * The name parameter is a category group for example:
145 * TRACE_EVENT0("v8,parse", "V8.Parse")
146 * The pointer returned points to a value with zero or more of the bits
147 * defined in CategoryGroupEnabledFlags.
148 **/
149 virtual const uint8_t* GetCategoryGroupEnabled(const char* name) {
150 static uint8_t no = 0;
151 return &no;
152 }
153
154 /**
155 * Adds a trace event to the platform tracing system. These function calls are
156 * usually the result of a TRACE_* macro from trace_event_common.h when
157 * tracing and the category of the particular trace are enabled. It is not
158 * advisable to call these functions on their own; they are really only meant
159 * to be used by the trace macros. The returned handle can be used by
160 * UpdateTraceEventDuration to update the duration of COMPLETE events.
161 */
162 virtual uint64_t AddTraceEvent(
163 char phase, const uint8_t* category_enabled_flag, const char* name,
164 const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
165 const char** arg_names, const uint8_t* arg_types,
166 const uint64_t* arg_values,
167 std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
168 unsigned int flags) {
169 return 0;
170 }
171 virtual uint64_t AddTraceEventWithTimestamp(
172 char phase, const uint8_t* category_enabled_flag, const char* name,
173 const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
174 const char** arg_names, const uint8_t* arg_types,
175 const uint64_t* arg_values,
176 std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
177 unsigned int flags, int64_t timestamp) {
178 return 0;
179 }
180
181 /**
182 * Sets the duration field of a COMPLETE trace event. It must be called with
183 * the handle returned from AddTraceEvent().
184 **/
185 virtual void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
186 const char* name, uint64_t handle) {}
187
188 class TraceStateObserver {
189 public:
190 virtual ~TraceStateObserver() = default;
191 virtual void OnTraceEnabled() = 0;
192 virtual void OnTraceDisabled() = 0;
193 };
194
195 /** Adds tracing state change observer. */
196 virtual void AddTraceStateObserver(TraceStateObserver*) {}
197
198 /** Removes tracing state change observer. */
199 virtual void RemoveTraceStateObserver(TraceStateObserver*) {}
200};
201
202/**
203 * A V8 memory page allocator.
204 *
205 * Can be implemented by an embedder to manage large host OS allocations.
206 */
207class PageAllocator {
208 public:
209 virtual ~PageAllocator() = default;
210
211 /**
212 * Gets the page granularity for AllocatePages and FreePages. Addresses and
213 * lengths for those calls should be multiples of AllocatePageSize().
214 */
215 virtual size_t AllocatePageSize() = 0;
216
217 /**
218 * Gets the page granularity for SetPermissions and ReleasePages. Addresses
219 * and lengths for those calls should be multiples of CommitPageSize().
220 */
221 virtual size_t CommitPageSize() = 0;
222
223 /**
224 * Sets the random seed so that GetRandomMmapAddr() will generate repeatable
225 * sequences of random mmap addresses.
226 */
227 virtual void SetRandomMmapSeed(int64_t seed) = 0;
228
229 /**
230 * Returns a randomized address, suitable for memory allocation under ASLR.
231 * The address will be aligned to AllocatePageSize.
232 */
233 virtual void* GetRandomMmapAddr() = 0;
234
235 /**
236 * Memory permissions.
237 */
238 enum Permission {
239 kNoAccess,
240 kRead,
241 kReadWrite,
242 // TODO(hpayer): Remove this flag. Memory should never be rwx.
243 kReadWriteExecute,
244 kReadExecute
245 };
246
247 /**
248 * Allocates memory in range with the given alignment and permission.
249 */
250 virtual void* AllocatePages(void* address, size_t length, size_t alignment,
251 Permission permissions) = 0;
252
253 /**
254 * Frees memory in a range that was allocated by a call to AllocatePages.
255 */
256 virtual bool FreePages(void* address, size_t length) = 0;
257
258 /**
259 * Releases memory in a range that was allocated by a call to AllocatePages.
260 */
261 virtual bool ReleasePages(void* address, size_t length,
262 size_t new_length) = 0;
263
264 /**
265 * Sets permissions on pages in an allocated range.
266 */
267 virtual bool SetPermissions(void* address, size_t length,
268 Permission permissions) = 0;
269
270 /**
271 * Frees memory in the given [address, address + size) range. address and size
272 * should be operating system page-aligned. The next write to this
273 * memory area brings the memory transparently back.
274 */
275 virtual bool DiscardSystemPages(void* address, size_t size) { return true; }
276};
277
278/**
279 * V8 Platform abstraction layer.
280 *
281 * The embedder has to provide an implementation of this interface before
282 * initializing the rest of V8.
283 */
284class Platform {
285 public:
286 virtual ~Platform() = default;
287
288 /**
289 * Allows the embedder to manage memory page allocations.
290 */
291 virtual PageAllocator* GetPageAllocator() {
292 // TODO(bbudge) Make this abstract after all embedders implement this.
293 return nullptr;
294 }
295
296 /**
297 * Enables the embedder to respond in cases where V8 can't allocate large
298 * blocks of memory. V8 retries the failed allocation once after calling this
299 * method. On success, execution continues; otherwise V8 exits with a fatal
300 * error.
301 * Embedder overrides of this function must NOT call back into V8.
302 */
303 virtual void OnCriticalMemoryPressure() {
304 // TODO(bbudge) Remove this when embedders override the following method.
305 // See crbug.com/634547.
306 }
307
308 /**
309 * Enables the embedder to respond in cases where V8 can't allocate large
310 * memory regions. The |length| parameter is the amount of memory needed.
311 * Returns true if memory is now available. Returns false if no memory could
312 * be made available. V8 will retry allocations until this method returns
313 * false.
314 *
315 * Embedder overrides of this function must NOT call back into V8.
316 */
317 virtual bool OnCriticalMemoryPressure(size_t length) { return false; }
318
319 /**
320 * Gets the number of worker threads used by
321 * Call(BlockingTask)OnWorkerThread(). This can be used to estimate the number
322 * of tasks a work package should be split into. A return value of 0 means
323 * that there are no worker threads available. Note that a value of 0 won't
324 * prohibit V8 from posting tasks using |CallOnWorkerThread|.
325 */
326 virtual int NumberOfWorkerThreads() = 0;
327
328 /**
329 * Returns a TaskRunner which can be used to post a task on the foreground.
330 * This function should only be called from a foreground thread.
331 */
332 virtual std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
333 Isolate* isolate) = 0;
334
335 /**
336 * Schedules a task to be invoked on a worker thread.
337 */
338 virtual void CallOnWorkerThread(std::unique_ptr<Task> task) = 0;
339
340 /**
341 * Schedules a task that blocks the main thread to be invoked with
342 * high-priority on a worker thread.
343 */
344 virtual void CallBlockingTaskOnWorkerThread(std::unique_ptr<Task> task) {
345 // Embedders may optionally override this to process these tasks in a high
346 // priority pool.
347 CallOnWorkerThread(std::move(task));
348 }
349
350 /**
351 * Schedules a task to be invoked with low-priority on a worker thread.
352 */
353 virtual void CallLowPriorityTaskOnWorkerThread(std::unique_ptr<Task> task) {
354 // Embedders may optionally override this to process these tasks in a low
355 // priority pool.
356 CallOnWorkerThread(std::move(task));
357 }
358
359 /**
360 * Schedules a task to be invoked on a worker thread after |delay_in_seconds|
361 * expires.
362 */
363 virtual void CallDelayedOnWorkerThread(std::unique_ptr<Task> task,
364 double delay_in_seconds) = 0;
365
366 /**
367 * Schedules a task to be invoked on a foreground thread wrt a specific
368 * |isolate|. Tasks posted for the same isolate should be execute in order of
369 * scheduling. The definition of "foreground" is opaque to V8.
370 */
371 V8_DEPRECATE_SOON(
372 "Use a taskrunner acquired by GetForegroundTaskRunner instead.",
373 virtual void CallOnForegroundThread(Isolate* isolate, Task* task)) = 0;
374
375 /**
376 * Schedules a task to be invoked on a foreground thread wrt a specific
377 * |isolate| after the given number of seconds |delay_in_seconds|.
378 * Tasks posted for the same isolate should be execute in order of
379 * scheduling. The definition of "foreground" is opaque to V8.
380 */
381 V8_DEPRECATE_SOON(
382 "Use a taskrunner acquired by GetForegroundTaskRunner instead.",
383 virtual void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
384 double delay_in_seconds)) = 0;
385
386 /**
387 * Schedules a task to be invoked on a foreground thread wrt a specific
388 * |isolate| when the embedder is idle.
389 * Requires that SupportsIdleTasks(isolate) is true.
390 * Idle tasks may be reordered relative to other task types and may be
391 * starved for an arbitrarily long time if no idle time is available.
392 * The definition of "foreground" is opaque to V8.
393 */
394 V8_DEPRECATE_SOON(
395 "Use a taskrunner acquired by GetForegroundTaskRunner instead.",
396 virtual void CallIdleOnForegroundThread(Isolate* isolate,
397 IdleTask* task)) {
398 // This must be overriden if |IdleTasksEnabled()|.
399 abort();
400 }
401
402 /**
403 * Returns true if idle tasks are enabled for the given |isolate|.
404 */
405 virtual bool IdleTasksEnabled(Isolate* isolate) {
406 return false;
407 }
408
409 /**
410 * Monotonically increasing time in seconds from an arbitrary fixed point in
411 * the past. This function is expected to return at least
412 * millisecond-precision values. For this reason,
413 * it is recommended that the fixed point be no further in the past than
414 * the epoch.
415 **/
416 virtual double MonotonicallyIncreasingTime() = 0;
417
418 /**
419 * Current wall-clock time in milliseconds since epoch.
420 * This function is expected to return at least millisecond-precision values.
421 */
422 virtual double CurrentClockTimeMillis() = 0;
423
424 typedef void (*StackTracePrinter)();
425
426 /**
427 * Returns a function pointer that print a stack trace of the current stack
428 * on invocation. Disables printing of the stack trace if nullptr.
429 */
430 virtual StackTracePrinter GetStackTracePrinter() { return nullptr; }
431
432 /**
433 * Returns an instance of a v8::TracingController. This must be non-nullptr.
434 */
435 virtual TracingController* GetTracingController() = 0;
436
437 /**
438 * Tells the embedder to generate and upload a crashdump during an unexpected
439 * but non-critical scenario.
440 */
441 virtual void DumpWithoutCrashing() {}
442
443 protected:
444 /**
445 * Default implementation of current wall-clock time in milliseconds
446 * since epoch. Useful for implementing |CurrentClockTimeMillis| if
447 * nothing special needed.
448 */
449 V8_EXPORT static double SystemClockTimeMillis();
450};
451
452} // namespace v8
453
454#endif // V8_V8_PLATFORM_H_
455