

index a8669330b456..351e801cead9 100644

--- a/

+++ b/ diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmplindex a8669330b456..351e801cead9 100644--- a/ Documentation/DocBook/gpu.tmpl +++ b/ Documentation/DocBook/gpu.tmpl @@ -3319,6 +3319,12 @@ int num_ioctls;</synopsis> !Pdrivers/gpu/drm/i915/intel_csr.c csr support for dmc !Idrivers/gpu/drm/i915/intel_csr.c </sect2> + <sect2> + <title>Video BIOS Table (VBT)</title> +!Pdrivers/gpu/drm/i915/intel_bios.c Video BIOS Table (VBT) +!Idrivers/gpu/drm/i915/intel_bios.c +!Idrivers/gpu/drm/i915/intel_bios.h + </sect2> </sect1> <sect1>

index 0fc38bb7276c..c5db23511184 100644

--- a/

+++ b/ diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.cindex 0fc38bb7276c..c5db23511184 100644--- a/ drivers/gpu/drm/i915/i915_debugfs.c +++ b/ drivers/gpu/drm/i915/i915_debugfs.c @@ -1331,7 +1331,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) struct intel_engine_cs *ring; u64 acthd[I915_NUM_RINGS]; u32 seqno[I915_NUM_RINGS]; - int i; + u32 instdone[I915_NUM_INSTDONE_REG]; + int i, j; if (!i915.enable_hangcheck) { seq_printf(m, "Hangcheck disabled

"); @@ -1345,6 +1346,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) acthd[i] = intel_ring_get_active_head(ring); } + i915_get_extra_instdone(dev, instdone); + intel_runtime_pm_put(dev_priv); if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) { @@ -1365,6 +1368,21 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) (long long)ring->hangcheck.max_acthd); seq_printf(m, "\tscore = %d

", ring->hangcheck.score); seq_printf(m, "\taction = %d

", ring->hangcheck.action); + + if (ring->id == RCS) { + seq_puts(m, "\tinstdone read ="); + + for (j = 0; j < I915_NUM_INSTDONE_REG; j++) + seq_printf(m, " 0x%08x", instdone[j]); + + seq_puts(m, "

\tinstdone accu ="); + + for (j = 0; j < I915_NUM_INSTDONE_REG; j++) + seq_printf(m, " 0x%08x", + ring->hangcheck.instdone[j]); + + seq_puts(m, "

"); + } } return 0; @@ -1942,11 +1960,8 @@ static int i915_context_status(struct seq_file *m, void *unused) seq_puts(m, "HW context "); describe_ctx(m, ctx); - for_each_ring(ring, dev_priv, i) { - if (ring->default_context == ctx) - seq_printf(m, "(default context %s) ", - ring->name); - } + if (ctx == dev_priv->kernel_context) + seq_printf(m, "(kernel context) "); if (i915.enable_execlists) { seq_putc(m, '

'); @@ -1976,12 +1991,13 @@ static int i915_context_status(struct seq_file *m, void *unused) } static void i915_dump_lrc_obj(struct seq_file *m, - struct intel_engine_cs *ring, - struct drm_i915_gem_object *ctx_obj) + struct intel_context *ctx, + struct intel_engine_cs *ring) { struct page *page; uint32_t *reg_state; int j; + struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; unsigned long ggtt_offset = 0; if (ctx_obj == NULL) { @@ -1991,7 +2007,7 @@ static void i915_dump_lrc_obj(struct seq_file *m, } seq_printf(m, "CONTEXT: %s %u

", ring->name, - intel_execlists_ctx_id(ctx_obj)); + intel_execlists_ctx_id(ctx, ring)); if (!i915_gem_obj_ggtt_bound(ctx_obj)) seq_puts(m, "\tNot bound in GGTT

"); @@ -2037,13 +2053,10 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) if (ret) return ret; - list_for_each_entry(ctx, &dev_priv->context_list, link) { - for_each_ring(ring, dev_priv, i) { - if (ring->default_context != ctx) - i915_dump_lrc_obj(m, ring, - ctx->engine[i].state); - } - } + list_for_each_entry(ctx, &dev_priv->context_list, link) + if (ctx != dev_priv->kernel_context) + for_each_ring(ring, dev_priv, i) + i915_dump_lrc_obj(m, ctx, ring); mutex_unlock(&dev->struct_mutex); @@ -2092,13 +2105,13 @@ static int i915_execlists(struct seq_file *m, void *data) seq_printf(m, "\tStatus pointer: 0x%08X

", status_pointer); read_pointer = ring->next_context_status_buffer; - write_pointer = status_pointer & 0x07; + write_pointer = GEN8_CSB_WRITE_PTR(status_pointer); if (read_pointer > write_pointer) - write_pointer += 6; + write_pointer += GEN8_CSB_ENTRIES; seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X

", read_pointer, write_pointer); - for (i = 0; i < 6; i++) { + for (i = 0; i < GEN8_CSB_ENTRIES; i++) { status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i)); ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i)); @@ -2115,11 +2128,8 @@ static int i915_execlists(struct seq_file *m, void *data) seq_printf(m, "\t%d requests in queue

", count); if (head_req) { - struct drm_i915_gem_object *ctx_obj; - - ctx_obj = head_req->ctx->engine[ring_id].state; seq_printf(m, "\tHead request id: %u

", - intel_execlists_ctx_id(ctx_obj)); + intel_execlists_ctx_id(head_req->ctx, ring)); seq_printf(m, "\tHead request tail: %u

", head_req->tail); }

index f357058c74d9..11d8414edbbe 100644

--- a/

+++ b/ diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.cindex f357058c74d9..11d8414edbbe 100644--- a/ drivers/gpu/drm/i915/i915_drv.c +++ b/ drivers/gpu/drm/i915/i915_drv.c @@ -1079,7 +1079,6 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv) */ broxton_init_cdclk(dev); broxton_ddi_phy_init(dev); - intel_prepare_ddi(dev); return 0; } @@ -1338,8 +1337,8 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, return 0; DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)

", - wait_for_on ? "on" : "off", - I915_READ(VLV_GTLC_PW_STATUS)); + onoff(wait_for_on), + I915_READ(VLV_GTLC_PW_STATUS)); /* * RC6 transitioning can be delayed up to 2 msec (see @@ -1348,7 +1347,7 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, err = wait_for(COND, 3); if (err) DRM_ERROR("timeout waiting for GT wells to go %s

", - wait_for_on ? "on" : "off"); + onoff(wait_for_on)); return err; #undef COND @@ -1359,7 +1358,7 @@ static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR)) return; - DRM_ERROR("GT register access while GT waking disabled

"); + DRM_DEBUG_DRIVER("GT register access while GT waking disabled

"); I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); } @@ -1503,6 +1502,10 @@ static int intel_runtime_suspend(struct device *device) enable_rpm_wakeref_asserts(dev_priv); WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); + + if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv)) + DRM_ERROR("Unclaimed access detected prior to suspending

"); + dev_priv->pm.suspended = true; /* @@ -1551,6 +1554,8 @@ static int intel_runtime_resume(struct device *device) intel_opregion_notify_adapter(dev, PCI_D0); dev_priv->pm.suspended = false; + if (intel_uncore_unclaimed_mmio(dev_priv)) + DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?

"); intel_guc_resume(dev);

index f0f75d7c0d94..afb0beee9975 100644

--- a/

+++ b/ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.hindex f0f75d7c0d94..afb0beee9975 100644--- a/ drivers/gpu/drm/i915/i915_drv.h +++ b/ drivers/gpu/drm/i915/i915_drv.h @@ -34,6 +34,7 @@ #include <uapi/drm/drm_fourcc.h> #include <drm/drmP.h> +#include "i915_params.h" #include "i915_reg.h" #include "intel_bios.h" #include "intel_ringbuffer.h" @@ -58,7 +59,7 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20151218" +#define DRIVER_DATE "20160124" #undef WARN_ON /* Many gcc seem to no see through this and fall over :( */ @@ -69,11 +70,11 @@ BUILD_BUG_ON(__i915_warn_cond); \ WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) #else -#define WARN_ON(x) WARN((x), "WARN_ON(%s)", #x ) +#define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")") #endif #undef WARN_ON_ONCE -#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(%s)", #x ) +#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")") #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s

", \ (long) (x), __func__); @@ -87,31 +88,25 @@ */ #define I915_STATE_WARN(condition, format...) ({ \ int __ret_warn_on = !!(condition); \ - if (unlikely(__ret_warn_on)) { \ - if (i915.verbose_state_checks) \ - WARN(1, format); \ - else \ + if (unlikely(__ret_warn_on)) \ + if (!WARN(i915.verbose_state_checks, format)) \ DRM_ERROR(format); \ - } \ unlikely(__ret_warn_on); \ }) -#define I915_STATE_WARN_ON(condition) ({ \ - int __ret_warn_on = !!(condition); \ - if (unlikely(__ret_warn_on)) { \ - if (i915.verbose_state_checks) \ - WARN(1, "WARN_ON(" #condition ")

"); \ - else \ - DRM_ERROR("WARN_ON(" #condition ")

"); \ - } \ - unlikely(__ret_warn_on); \ -}) +#define I915_STATE_WARN_ON(x) \ + I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")") static inline const char *yesno(bool v) { return v ? "yes" : "no"; } +static inline const char *onoff(bool v) +{ + return v ? "on" : "off"; +} + enum pipe { INVALID_PIPE = -1, PIPE_A = 0, @@ -339,7 +334,7 @@ struct drm_i915_file_private { unsigned boosts; } rps; - struct intel_engine_cs *bsd_ring; + unsigned int bsd_ring; }; enum intel_dpll_id { @@ -633,6 +628,7 @@ struct drm_i915_display_funcs { struct dpll *best_clock); int (*compute_pipe_wm)(struct intel_crtc *crtc, struct drm_atomic_state *state); + void (*program_watermarks)(struct intel_crtc_state *cstate); void (*update_wm)(struct drm_crtc *crtc); int (*modeset_calc_cdclk)(struct drm_atomic_state *state); void (*modeset_commit_cdclk)(struct drm_atomic_state *state); @@ -657,9 +653,6 @@ struct drm_i915_display_funcs { struct drm_i915_gem_object *obj, struct drm_i915_gem_request *req, uint32_t flags); - void (*update_primary_plane)(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int x, int y); void (*hpd_irq_setup)(struct drm_device *dev); /* clock updates for mode set */ /* cursor updates */ @@ -726,6 +719,8 @@ struct intel_uncore { i915_reg_t reg_post; u32 val_reset; } fw_domain[FW_DOMAIN_ID_COUNT]; + + int unclaimed_mmio_check; }; /* Iterate over initialised fw domains */ @@ -889,6 +884,9 @@ struct intel_context { struct drm_i915_gem_object *state; struct intel_ringbuffer *ringbuf; int pin_count; + struct i915_vma *lrc_vma; + u64 lrc_desc; + uint32_t *lrc_reg_state; } engine[I915_NUM_RINGS]; struct list_head link; @@ -1301,7 +1299,7 @@ struct i915_gem_mm { bool busy; /* the indicator for dispatch video commands on two BSD rings */ - int bsd_ring_dispatch_index; + unsigned int bsd_ring_dispatch_index; /** Bit 6 swizzling required for X tiling */ uint32_t bit_6_swizzle_x; @@ -1487,7 +1485,7 @@ struct intel_vbt_data { u8 seq_version; u32 size; u8 *data; - u8 *sequence[MIPI_SEQ_MAX]; + const u8 *sequence[MIPI_SEQ_MAX]; } dsi; int crt_ddc_pin; @@ -1784,7 +1782,7 @@ struct drm_i915_private { unsigned int fsb_freq, mem_freq, is_ddr3; unsigned int skl_boot_cdclk; - unsigned int cdclk_freq, max_cdclk_freq; + unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq; unsigned int max_dotclk_freq; unsigned int hpll_freq; unsigned int czclk_freq; @@ -1829,8 +1827,13 @@ struct drm_i915_private { struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; #endif + /* dpll and cdclk state is protected by connection_mutex */ int num_shared_dpll; struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; + + unsigned int active_crtcs; + unsigned int min_pixclk[I915_MAX_PIPES]; + int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; struct i915_workarounds workarounds; @@ -1945,6 +1948,8 @@ struct drm_i915_private { void (*stop_ring)(struct intel_engine_cs *ring); } gt; + struct intel_context *kernel_context; + bool edp_low_vswing; /* perform PHY state sanity checks? */ @@ -2265,9 +2270,9 @@ struct drm_i915_gem_request { }; -int i915_gem_request_alloc(struct intel_engine_cs *ring, - struct intel_context *ctx, - struct drm_i915_gem_request **req_out); +struct drm_i915_gem_request * __must_check +i915_gem_request_alloc(struct intel_engine_cs *engine, + struct intel_context *ctx); void i915_gem_request_cancel(struct drm_i915_gem_request *req); void i915_gem_request_free(struct kref *req_ref); int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, @@ -2576,6 +2581,11 @@ struct drm_i915_cmd_table { /* Early gen2 have a totally busted CS tlb and require pinned batches. */ #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) + +/* WaRsDisableCoarsePowerGating:skl,bxt */ +#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \ + ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \ + IS_SKL_REVID(dev, 0, SKL_REVID_F0))) /* * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts * even when in MSI mode. This results in spurious interrupt warnings if the @@ -2665,44 +2675,7 @@ extern int i915_max_ioctl; extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); extern int i915_resume_switcheroo(struct drm_device *dev); -/* i915_params.c */ -struct i915_params { - int modeset; - int panel_ignore_lid; - int semaphores; - int lvds_channel_mode; - int panel_use_ssc; - int vbt_sdvo_panel_type; - int enable_rc6; - int enable_dc; - int enable_fbc; - int enable_ppgtt; - int enable_execlists; - int enable_psr; - unsigned int preliminary_hw_support; - int disable_power_well; - int enable_ips; - int invert_brightness; - int enable_cmd_parser; - /* leave bools at the end to not create holes */ - bool enable_hangcheck; - bool fastboot; - bool prefault_disable; - bool load_detect_test; - bool reset; - bool disable_display; - bool disable_vtd_wa; - bool enable_guc_submission; - int guc_log_level; - int use_mmio_flip; - int mmio_debug; - bool verbose_state_checks; - bool nuclear_pageflip; - int edp_vswing; -}; -extern struct i915_params i915 __read_mostly; - - /* i915_dma.c */ +/* i915_dma.c */ extern int i915_driver_load(struct drm_device *, unsigned long flags); extern int i915_driver_unload(struct drm_device *); extern int i915_driver_open(struct drm_device *dev, struct drm_file *file); @@ -2745,7 +2718,8 @@ extern void intel_uncore_sanitize(struct drm_device *dev); extern void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake); extern void intel_uncore_init(struct drm_device *dev); -extern void intel_uncore_check_errors(struct drm_device *dev); +extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); +extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); extern void intel_uncore_fini(struct drm_device *dev); extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);

index ddc21d4b388d..371bbb28c471 100644

--- a/

+++ b/ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.cindex ddc21d4b388d..371bbb28c471 100644--- a/ drivers/gpu/drm/i915/i915_gem.c +++ b/ drivers/gpu/drm/i915/i915_gem.c @@ -1251,7 +1251,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; DEFINE_WAIT(wait); unsigned long timeout_expire; - s64 before, now; + s64 before = 0; /* Only to silence a compiler warning. */ int ret; WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); @@ -1271,14 +1271,17 @@ int __i915_wait_request(struct drm_i915_gem_request *req, return -ETIME; timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout); + + /* + * Record current time in case interrupted by signal, or wedged. + */ + before = ktime_get_raw_ns(); } if (INTEL_INFO(dev_priv)->gen >= 6) gen6_rps_boost(dev_priv, rps, req->emitted_jiffies); - /* Record current time in case interrupted by signal, or wedged */ trace_i915_gem_request_wait_begin(req); - before = ktime_get_raw_ns(); /* Optimistic spin for the next jiffie before touching IRQs */ ret = __i915_spin_request(req, state); @@ -1343,11 +1346,10 @@ int __i915_wait_request(struct drm_i915_gem_request *req, finish_wait(&ring->irq_queue, &wait); out: - now = ktime_get_raw_ns(); trace_i915_gem_request_wait_end(req); if (timeout) { - s64 tres = *timeout - (now - before); + s64 tres = *timeout - (ktime_get_raw_ns() - before); *timeout = tres < 0 ? 0 : tres; @@ -2677,10 +2679,8 @@ void i915_gem_request_free(struct kref *req_ref) i915_gem_request_remove_from_client(req); if (ctx) { - if (i915.enable_execlists) { - if (ctx != req->ring->default_context) - intel_lr_context_unpin(req); - } + if (i915.enable_execlists && ctx != req->i915->kernel_context) + intel_lr_context_unpin(req); i915_gem_context_unreference(ctx); } @@ -2688,9 +2688,10 @@ void i915_gem_request_free(struct kref *req_ref) kmem_cache_free(req->i915->requests, req); } -int i915_gem_request_alloc(struct intel_engine_cs *ring, - struct intel_context *ctx, - struct drm_i915_gem_request **req_out) +static inline int +__i915_gem_request_alloc(struct intel_engine_cs *ring, + struct intel_context *ctx, + struct drm_i915_gem_request **req_out) { struct drm_i915_private *dev_priv = to_i915(ring->dev); struct drm_i915_gem_request *req; @@ -2753,6 +2754,31 @@ err: return ret; } +/** + * i915_gem_request_alloc - allocate a request structure + * + * @engine: engine that we wish to issue the request on. + * @ctx: context that the request will be associated with. + * This can be NULL if the request is not directly related to + * any specific user context, in which case this function will + * choose an appropriate context to use. + * + * Returns a pointer to the allocated request if successful, + * or an error code if not. + */ +struct drm_i915_gem_request * +i915_gem_request_alloc(struct intel_engine_cs *engine, + struct intel_context *ctx) +{ + struct drm_i915_gem_request *req; + int err; + + if (ctx == NULL) + ctx = to_i915(engine->dev)->kernel_context; + err = __i915_gem_request_alloc(engine, ctx, &req); + return err ? ERR_PTR(err) : req; +} + void i915_gem_request_cancel(struct drm_i915_gem_request *req) { intel_ring_reserved_space_cancel(req->ringbuf); @@ -3170,9 +3196,13 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, return 0; if (*to_req == NULL) { - ret = i915_gem_request_alloc(to, to->default_context, to_req); - if (ret) - return ret; + struct drm_i915_gem_request *req; + + req = i915_gem_request_alloc(to, NULL); + if (IS_ERR(req)) + return PTR_ERR(req); + + *to_req = req; } trace_i915_gem_ring_sync_to(*to_req, from, from_req); @@ -3372,9 +3402,9 @@ int i915_gpu_idle(struct drm_device *dev) if (!i915.enable_execlists) { struct drm_i915_gem_request *req; - ret = i915_gem_request_alloc(ring, ring->default_context, &req); - if (ret) - return ret; + req = i915_gem_request_alloc(ring, NULL); + if (IS_ERR(req)) + return PTR_ERR(req); ret = i915_switch_context(req); if (ret) { @@ -4328,10 +4358,20 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, if (ret) goto unref; - BUILD_BUG_ON(I915_NUM_RINGS > 16); - args->busy = obj->active << 16; - if (obj->last_write_req) - args->busy |= obj->last_write_req->ring->id; + args->busy = 0; + if (obj->active) { + int i; + + for (i = 0; i < I915_NUM_RINGS; i++) { + struct drm_i915_gem_request *req; + + req = obj->last_read_req[i]; + if (req) + args->busy |= 1 << (16 + req->ring->exec_id); + } + if (obj->last_write_req) + args->busy |= obj->last_write_req->ring->exec_id; + } unref: drm_gem_object_unreference(&obj->base); @@ -4832,7 +4872,7 @@ i915_gem_init_hw(struct drm_device *dev) */ init_unused_rings(dev); - BUG_ON(!dev_priv->ring[RCS].default_context); + BUG_ON(!dev_priv->kernel_context); ret = i915_ppgtt_init_hw(dev); if (ret) { @@ -4869,10 +4909,9 @@ i915_gem_init_hw(struct drm_device *dev) for_each_ring(ring, dev_priv, i) { struct drm_i915_gem_request *req; - WARN_ON(!ring->default_context); - - ret = i915_gem_request_alloc(ring, ring->default_context, &req); - if (ret) { + req = i915_gem_request_alloc(ring, NULL); + if (IS_ERR(req)) { + ret = PTR_ERR(req); i915_gem_cleanup_ringbuffer(dev); goto out; } @@ -5112,6 +5151,8 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file) spin_lock_init(&file_priv->mm.lock); INIT_LIST_HEAD(&file_priv->mm.request_list); + file_priv->bsd_ring = -1; + ret = i915_gem_context_open(dev, file); if (ret) kfree(file_priv);

index c25083c78ba7..6a4f64b03db6 100644

--- a/

+++ b/ diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.cindex c25083c78ba7..6a4f64b03db6 100644--- a/ drivers/gpu/drm/i915/i915_gem_context.c +++ b/ drivers/gpu/drm/i915/i915_gem_context.c @@ -347,22 +347,20 @@ void i915_gem_context_reset(struct drm_device *dev) i915_gem_context_unreference(lctx); ring->last_context = NULL; } - - /* Force the GPU state to be reinitialised on enabling */ - if (ring->default_context) - ring->default_context->legacy_hw_ctx.initialized = false; } + + /* Force the GPU state to be reinitialised on enabling */ + dev_priv->kernel_context->legacy_hw_ctx.initialized = false; } int i915_gem_context_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_context *ctx; - int i; /* Init should only be called once per module load. Eventually the * restriction on the context_disabled check can be loosened. */ - if (WARN_ON(dev_priv->ring[RCS].default_context)) + if (WARN_ON(dev_priv->kernel_context)) return 0; if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) { @@ -392,12 +390,7 @@ int i915_gem_context_init(struct drm_device *dev) return PTR_ERR(ctx); } - for (i = 0; i < I915_NUM_RINGS; i++) { - struct intel_engine_cs *ring = &dev_priv->ring[i]; - - /* NB: RCS will hold a ref for all rings */ - ring->default_context = ctx; - } + dev_priv->kernel_context = ctx; DRM_DEBUG_DRIVER("%s context support initialized

", i915.enable_execlists ? "LR" : @@ -408,7 +401,7 @@ int i915_gem_context_init(struct drm_device *dev) void i915_gem_context_fini(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_context *dctx = dev_priv->ring[RCS].default_context; + struct intel_context *dctx = dev_priv->kernel_context; int i; if (dctx->legacy_hw_ctx.rcs_state) { @@ -435,17 +428,17 @@ void i915_gem_context_fini(struct drm_device *dev) i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); } - for (i = 0; i < I915_NUM_RINGS; i++) { + for (i = I915_NUM_RINGS; --i >= 0;) { struct intel_engine_cs *ring = &dev_priv->ring[i]; - if (ring->last_context) + if (ring->last_context) { i915_gem_context_unreference(ring->last_context); - - ring->default_context = NULL; - ring->last_context = NULL; + ring->last_context = NULL; + } } i915_gem_context_unreference(dctx); + dev_priv->kernel_context = NULL; } int i915_gem_context_enable(struct drm_i915_gem_request *req)

index dccb517361b3..2dc08ce1079a 100644

--- a/

+++ b/ diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.cindex dccb517361b3..2dc08ce1079a 100644--- a/ drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/ drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -193,13 +193,10 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle) return eb->lut[handle]; } else { struct hlist_head *head; - struct hlist_node *node; + struct i915_vma *vma; head = &eb->buckets[handle & eb->and]; - hlist_for_each(node, head) { - struct i915_vma *vma; - - vma = hlist_entry(node, struct i915_vma, exec_node); + hlist_for_each_entry(vma, head, exec_node) { if (vma->exec_handle == handle) return vma; } @@ -1309,6 +1306,9 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, exec_start = params->batch_obj_vm_offset + params->args_batch_start_offset; + if (exec_len == 0) + exec_len = params->batch_obj->base.size; + ret = ring->dispatch_execbuffer(params->request, exec_start, exec_len, params->dispatch_flags); @@ -1325,33 +1325,23 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, /** * Find one BSD ring to dispatch the corresponding BSD command. - * The Ring ID is returned. + * The ring index is returned. */ -static int gen8_dispatch_bsd_ring(struct drm_device *dev, - struct drm_file *file) +static unsigned int +gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file) { - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_file_private *file_priv = file->driver_priv; - /* Check whether the file_priv is using one ring */ - if (file_priv->bsd_ring) - return file_priv->bsd_ring->id; - else { - /* If no, use the ping-pong mechanism to select one ring */ - int ring_id; - - mutex_lock(&dev->struct_mutex); - if (dev_priv->mm.bsd_ring_dispatch_index == 0) { - ring_id = VCS; - dev_priv->mm.bsd_ring_dispatch_index = 1; - } else { - ring_id = VCS2; - dev_priv->mm.bsd_ring_dispatch_index = 0; - } - file_priv->bsd_ring = &dev_priv->ring[ring_id]; - mutex_unlock(&dev->struct_mutex); - return ring_id; + /* Check whether the file_priv has already selected one ring. */ + if ((int)file_priv->bsd_ring < 0) { + /* If not, use the ping-pong mechanism to select one. */ + mutex_lock(&dev_priv->dev->struct_mutex); + file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index; + dev_priv->mm.bsd_ring_dispatch_index ^= 1; + mutex_unlock(&dev_priv->dev->struct_mutex); } + + return file_priv->bsd_ring; } static struct drm_i915_gem_object * @@ -1374,6 +1364,63 @@ eb_get_batch(struct eb_vmas *eb) return vma->obj; } +#define I915_USER_RINGS (4) + +static const enum intel_ring_id user_ring_map[I915_USER_RINGS + 1] = { + [I915_EXEC_DEFAULT] = RCS, + [I915_EXEC_RENDER] = RCS, + [I915_EXEC_BLT] = BCS, + [I915_EXEC_BSD] = VCS, + [I915_EXEC_VEBOX] = VECS +}; + +static int +eb_select_ring(struct drm_i915_private *dev_priv, + struct drm_file *file, + struct drm_i915_gem_execbuffer2 *args, + struct intel_engine_cs **ring) +{ + unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK; + + if (user_ring_id > I915_USER_RINGS) { + DRM_DEBUG("execbuf with unknown ring: %u

", user_ring_id); + return -EINVAL; + } + + if ((user_ring_id != I915_EXEC_BSD) && + ((args->flags & I915_EXEC_BSD_MASK) != 0)) { + DRM_DEBUG("execbuf with non bsd ring but with invalid " + "bsd dispatch flags: %d

", (int)(args->flags)); + return -EINVAL; + } + + if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) { + unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK; + + if (bsd_idx == I915_EXEC_BSD_DEFAULT) { + bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file); + } else if (bsd_idx >= I915_EXEC_BSD_RING1 && + bsd_idx <= I915_EXEC_BSD_RING2) { + bsd_idx--; + } else { + DRM_DEBUG("execbuf with unknown bsd ring: %u

", + bsd_idx); + return -EINVAL; + } + + *ring = &dev_priv->ring[_VCS(bsd_idx)]; + } else { + *ring = &dev_priv->ring[user_ring_map[user_ring_id]]; + } + + if (!intel_ring_initialized(*ring)) { + DRM_DEBUG("execbuf with invalid ring: %u

", user_ring_id); + return -EINVAL; + } + + return 0; +} + static int i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_file *file, @@ -1381,6 +1428,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_i915_gem_exec_object2 *exec) { struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_request *req = NULL; struct eb_vmas *eb; struct drm_i915_gem_object *batch_obj; struct drm_i915_gem_exec_object2 shadow_exec_entry; @@ -1411,51 +1459,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (args->flags & I915_EXEC_IS_PINNED) dispatch_flags |= I915_DISPATCH_PINNED; - if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) { - DRM_DEBUG("execbuf with unknown ring: %d

", - (int)(args->flags & I915_EXEC_RING_MASK)); - return -EINVAL; - } - - if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) && - ((args->flags & I915_EXEC_BSD_MASK) != 0)) { - DRM_DEBUG("execbuf with non bsd ring but with invalid " - "bsd dispatch flags: %d

", (int)(args->flags)); - return -EINVAL; - } - - if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT) - ring = &dev_priv->ring[RCS]; - else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) { - if (HAS_BSD2(dev)) { - int ring_id; - - switch (args->flags & I915_EXEC_BSD_MASK) { - case I915_EXEC_BSD_DEFAULT: - ring_id = gen8_dispatch_bsd_ring(dev, file); - ring = &dev_priv->ring[ring_id]; - break; - case I915_EXEC_BSD_RING1: - ring = &dev_priv->ring[VCS]; - break; - case I915_EXEC_BSD_RING2: - ring = &dev_priv->ring[VCS2]; - break; - default: - DRM_DEBUG("execbuf with unknown bsd ring: %d

", - (int)(args->flags & I915_EXEC_BSD_MASK)); - return -EINVAL; - } - } else - ring = &dev_priv->ring[VCS]; - } else - ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1]; - - if (!intel_ring_initialized(ring)) { - DRM_DEBUG("execbuf with invalid ring: %d

", - (int)(args->flags & I915_EXEC_RING_MASK)); - return -EINVAL; - } + ret = eb_select_ring(dev_priv, file, args, &ring); + if (ret) + return ret; if (args->buffer_count < 1) { DRM_DEBUG("execbuf with %d buffers

", args->buffer_count); @@ -1602,11 +1608,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm); /* Allocate a request for this batch buffer nice and early. */ - ret = i915_gem_request_alloc(ring, ctx, ¶ms->request); - if (ret) + req = i915_gem_request_alloc(ring, ctx); + if (IS_ERR(req)) { + ret = PTR_ERR(req); goto err_batch_unpin; + } - ret = i915_gem_request_add_to_client(params->request, file); + ret = i915_gem_request_add_to_client(req, file); if (ret) goto err_batch_unpin; @@ -1622,6 +1630,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, params->dispatch_flags = dispatch_flags; params->batch_obj = batch_obj; params->ctx = ctx; + params->request = req; ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas); @@ -1645,8 +1654,8 @@ err: * must be freed again. If it was submitted then it is being tracked * on the active request list and no clean up is required here. */ - if (ret && params->request) - i915_gem_request_cancel(params->request); + if (ret && req) + i915_gem_request_cancel(req); mutex_unlock(&dev->struct_mutex);

index 56f4f2e58d53..7377b6725c33 100644

--- a/

+++ b/ diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.cindex 56f4f2e58d53..7377b6725c33 100644--- a/ drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/ drivers/gpu/drm/i915/i915_gem_gtt.c @@ -96,9 +96,11 @@ static int i915_get_ggtt_vma_pages(struct i915_vma *vma); -const struct i915_ggtt_view i915_ggtt_view_normal; +const struct i915_ggtt_view i915_ggtt_view_normal = { + .type = I915_GGTT_VIEW_NORMAL, +}; const struct i915_ggtt_view i915_ggtt_view_rotated = { - .type = I915_GGTT_VIEW_ROTATED + .type = I915_GGTT_VIEW_ROTATED, }; static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) @@ -3329,7 +3331,7 @@ i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, } static struct scatterlist * -rotate_pages(dma_addr_t *in, unsigned int offset, +rotate_pages(const dma_addr_t *in, unsigned int offset, unsigned int width, unsigned int height, struct sg_table *st, struct scatterlist *sg) {

index b448ad832dcf..e5737963ab79 100644

--- a/

+++ b/ diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.hindex b448ad832dcf..e5737963ab79 100644--- a/ drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/ drivers/gpu/drm/i915/i915_gem_gtt.h @@ -44,7 +44,6 @@ typedef uint64_t gen8_ppgtt_pml4e_t; #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT) - /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)

index f7df54a8ee2b..16da9c1422cc 100644

--- a/

+++ b/ diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.cindex f7df54a8ee2b..16da9c1422cc 100644--- a/ drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/ drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -47,6 +47,46 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) #endif } +static int num_vma_bound(struct drm_i915_gem_object *obj) +{ + struct i915_vma *vma; + int count = 0; + + list_for_each_entry(vma, &obj->vma_list, vma_link) { + if (drm_mm_node_allocated(&vma->node)) + count++; + if (vma->pin_count) + count++; + } + + return count; +} + +static bool swap_available(void) +{ + return get_nr_swap_pages() > 0; +} + +static bool can_release_pages(struct drm_i915_gem_object *obj) +{ + /* Only report true if by unbinding the object and putting its pages + * we can actually make forward progress towards freeing physical + * pages. + * + * If the pages are pinned for any other reason than being bound + * to the GPU, simply unbinding from the GPU is not going to succeed + * in releasing our pin count on the pages themselves. + */ + if (obj->pages_pin_count != num_vma_bound(obj)) + return false; + + /* We can only return physical pages to the system if we can either + * discard the contents (because the user has marked them as being + * purgeable) or if we can move their contents out to swap. + */ + return swap_available() || obj->madv == I915_MADV_DONTNEED; +} + /** * i915_gem_shrink - Shrink buffer object caches * @dev_priv: i915 device @@ -129,6 +169,9 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active) continue; + if (!can_release_pages(obj)) + continue; + drm_gem_object_reference(&obj->base); /* For the unbound phase, this should be a no-op! */ @@ -188,21 +231,6 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) return true; } -static int num_vma_bound(struct drm_i915_gem_object *obj) -{ - struct i915_vma *vma; - int count = 0; - - list_for_each_entry(vma, &obj->vma_list, vma_link) { - if (drm_mm_node_allocated(&vma->node)) - count++; - if (vma->pin_count) - count++; - } - - return count; -} - static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) { @@ -222,7 +250,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) count += obj->base.size >> PAGE_SHIFT; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - if (!obj->active && obj->pages_pin_count == num_vma_bound(obj)) + if (!obj->active && can_release_pages(obj)) count += obj->base.size >> PAGE_SHIFT; }

index 3476877fc0d6..c384dc9c8a63 100644

--- a/

+++ b/ diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.cindex 3476877fc0d6..c384dc9c8a63 100644--- a/ drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/ drivers/gpu/drm/i915/i915_gem_stolen.c @@ -569,6 +569,9 @@ _i915_gem_object_create_stolen(struct drm_device *dev, if (obj->pages == NULL) goto cleanup; + obj->get_page.sg = obj->pages->sgl; + obj->get_page.last = 0; + i915_gem_object_pin_pages(obj); obj->stolen = stolen;

index 06ca4082735b..7eeb24427785 100644

--- a/

+++ b/ diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.cindex 06ca4082735b..7eeb24427785 100644--- a/ drivers/gpu/drm/i915/i915_gpu_error.c +++ b/ drivers/gpu/drm/i915/i915_gpu_error.c @@ -1050,7 +1050,7 @@ static void i915_gem_record_rings(struct drm_device *dev, if (request) rbuf = request->ctx->engine[ring->id].ringbuf; else - rbuf = ring->default_context->engine[ring->id].ringbuf; + rbuf = dev_priv->kernel_context->engine[ring->id].ringbuf; } else rbuf = ring->buffer;

index 685c7991e24f..e4ba5822289b 100644

--- a/

+++ b/ diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.hindex 685c7991e24f..e4ba5822289b 100644--- a/ drivers/gpu/drm/i915/i915_guc_reg.h +++ b/ drivers/gpu/drm/i915/i915_guc_reg.h @@ -40,6 +40,7 @@ #define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT) #define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4) +#define SOFT_SCRATCH_COUNT 16 #define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4) #define UOS_RSA_SCRATCH_MAX_COUNT 64

index 05aa7e61cbe0..51ae5c1f806d 100644

--- a/

+++ b/ diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.cindex 05aa7e61cbe0..51ae5c1f806d 100644--- a/ drivers/gpu/drm/i915/i915_guc_submission.c +++ b/ drivers/gpu/drm/i915/i915_guc_submission.c @@ -158,10 +158,8 @@ static int host2guc_sample_forcewake(struct intel_guc *guc, data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE; /* WaRsDisableCoarsePowerGating:skl,bxt */ - if (!intel_enable_rc6(dev_priv->dev) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1) || - (IS_SKL_GT3(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)) || - (IS_SKL_GT4(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0))) + if (!intel_enable_rc6(dev) || + NEEDS_WaRsDisableCoarsePowerGating(dev)) data[1] = 0; else /* bit 0 and 1 are for Render and Media domain separately */ @@ -246,6 +244,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc) db_exc.cookie = 1; } + /* Finally, update the cached copy of the GuC's WQ head */ + gc->wq_head = desc->head; + kunmap_atomic(base); return ret; } @@ -471,28 +472,30 @@ static void guc_fini_ctx_desc(struct intel_guc *guc, sizeof(desc) * client->ctx_index); } -/* Get valid workqueue item and return it back to offset */ -static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset) +int i915_guc_wq_check_space(struct i915_guc_client *gc) { struct guc_process_desc *desc; void *base; u32 size = sizeof(struct guc_wq_item); int ret = -ETIMEDOUT, timeout_counter = 200; + if (!gc) + return 0; + + /* Quickly return if wq space is available since last time we cache the + * head position. */ + if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) + return 0; + base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0)); desc = base + gc->proc_desc_offset; while (timeout_counter-- > 0) { - if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) { - *offset = gc->wq_tail; - - /* advance the tail for next workqueue item */ - gc->wq_tail += size; - gc->wq_tail &= gc->wq_size - 1; + gc->wq_head = desc->head; - /* this will break the loop */ - timeout_counter = 0; + if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) { ret = 0; + break; } if (timeout_counter) @@ -510,12 +513,16 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc, enum intel_ring_id ring_id = rq->ring->id; struct guc_wq_item *wqi; void *base; - u32 tail, wq_len, wq_off = 0; - int ret; + u32 tail, wq_len, wq_off, space; + + space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size); + if (WARN_ON(space < sizeof(struct guc_wq_item))) + return -ENOSPC; /* shouldn't happen */ - ret = guc_get_workqueue_space(gc, &wq_off); - if (ret) - return ret; + /* postincrement WQ tail for next time */ + wq_off = gc->wq_tail; + gc->wq_tail += sizeof(struct guc_wq_item); + gc->wq_tail &= gc->wq_size - 1; /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we * should not have the case where structure wqi is across page, neither @@ -832,6 +839,96 @@ static void guc_create_log(struct intel_guc *guc) guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags; } +static void init_guc_policies(struct guc_policies *policies) +{ + struct guc_policy *policy; + u32 p, i; + + policies->dpc_promote_time = 500000; + policies->max_num_work_items = POLICY_MAX_NUM_WI; + + for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) { + for (i = 0; i < I915_NUM_RINGS; i++) { + policy = &policies->policy[p][i]; + + policy->execution_quantum = 1000000; + policy->preemption_time = 500000; + policy->fault_time = 250000; + policy->policy_flags = 0; + } + } + + policies->is_valid = 1; +} + +static void guc_create_ads(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct drm_i915_gem_object *obj; + struct guc_ads *ads; + struct guc_policies *policies; + struct guc_mmio_reg_state *reg_state; + struct intel_engine_cs *ring; + struct page *page; + u32 size, i; + + /* The ads obj includes the struct itself and buffers passed to GuC */ + size = sizeof(struct guc_ads) + sizeof(struct guc_policies) + + sizeof(struct guc_mmio_reg_state) + + GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE; + + obj = guc->ads_obj; + if (!obj) { + obj = gem_allocate_guc_obj(dev_priv->dev, PAGE_ALIGN(size)); + if (!obj) + return; + + guc->ads_obj = obj; + } + + page = i915_gem_object_get_page(obj, 0); + ads = kmap(page); + + /* + * The GuC requires a "Golden Context" when it reinitialises + * engines after a reset. Here we use the Render ring default + * context, which must already exist and be pinned in the GGTT, + * so its address won't change after we've told the GuC where + * to find it. + */ + ring = &dev_priv->ring[RCS]; + ads->golden_context_lrca = ring->status_page.gfx_addr; + + for_each_ring(ring, dev_priv, i) + ads->eng_state_size[i] = intel_lr_context_size(ring); + + /* GuC scheduling policies */ + policies = (void *)ads + sizeof(struct guc_ads); + init_guc_policies(policies); + + ads->scheduler_policies = i915_gem_obj_ggtt_offset(obj) + + sizeof(struct guc_ads); + + /* MMIO reg state */ + reg_state = (void *)policies + sizeof(struct guc_policies); + + for (i = 0; i < I915_NUM_RINGS; i++) { + reg_state->mmio_white_list[i].mmio_start = + dev_priv->ring[i].mmio_base + GUC_MMIO_WHITE_LIST_START; + + /* Nothing to be saved or restored for now. */ + reg_state->mmio_white_list[i].count = 0; + } + + ads->reg_state_addr = ads->scheduler_policies + + sizeof(struct guc_policies); + + ads->reg_state_buffer = ads->reg_state_addr + + sizeof(struct guc_mmio_reg_state); + + kunmap(page); +} + /* * Set up the memory resources to be shared with the GuC. At this point, * we require just one object that can be mapped through the GGTT. @@ -858,6 +955,8 @@ int i915_guc_submission_init(struct drm_device *dev) guc_create_log(guc); + guc_create_ads(guc); + return 0; } @@ -865,7 +964,7 @@ int i915_guc_submission_enable(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc *guc = &dev_priv->guc; - struct intel_context *ctx = dev_priv->ring[RCS].default_context; + struct intel_context *ctx = dev_priv->kernel_context; struct i915_guc_client *client; /* client for execbuf submission */ @@ -896,6 +995,9 @@ void i915_guc_submission_fini(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc *guc = &dev_priv->guc; + gem_release_guc_obj(dev_priv->guc.ads_obj); + guc->ads_obj = NULL; + gem_release_guc_obj(dev_priv->guc.log_obj); guc->log_obj = NULL; @@ -919,7 +1021,7 @@ int intel_guc_suspend(struct drm_device *dev) if (!i915.enable_guc_submission) return 0; - ctx = dev_priv->ring[RCS].default_context; + ctx = dev_priv->kernel_context; data[0] = HOST2GUC_ACTION_ENTER_S_STATE; /* any value greater than GUC_POWER_D0 */ @@ -945,7 +1047,7 @@ int intel_guc_resume(struct drm_device *dev) if (!i915.enable_guc_submission) return 0; - ctx = dev_priv->ring[RCS].default_context; + ctx = dev_priv->kernel_context; data[0] = HOST2GUC_ACTION_EXIT_S_STATE; data[1] = GUC_POWER_D0;

index fa8afa7860ae..25a89373df63 100644

--- a/

+++ b/ diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.cindex fa8afa7860ae..25a89373df63 100644--- a/ drivers/gpu/drm/i915/i915_irq.c +++ b/ drivers/gpu/drm/i915/i915_irq.c @@ -2188,10 +2188,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) /* IRQs are synced during runtime_suspend, we don't require a wakeref */ disable_rpm_wakeref_asserts(dev_priv); - /* We get interrupts on unclaimed registers, so check for this before we - * do any I915_{READ,WRITE}. */ - intel_uncore_check_errors(dev); - /* disable master interrupt before clearing iir */ de_ier = I915_READ(DEIER); I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); @@ -2268,43 +2264,20 @@ static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, intel_hpd_irq_handler(dev, pin_mask, long_mask); } -static irqreturn_t gen8_irq_handler(int irq, void *arg) +static irqreturn_t +gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) { - struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 master_ctl; + struct drm_device *dev = dev_priv->dev; irqreturn_t ret = IRQ_NONE; - uint32_t tmp = 0; + u32 iir; enum pipe pipe; - u32 aux_mask = GEN8_AUX_CHANNEL_A; - - if (!intel_irqs_enabled(dev_priv)) - return IRQ_NONE; - - /* IRQs are synced during runtime_suspend, we don't require a wakeref */ - disable_rpm_wakeref_asserts(dev_priv); - - if (INTEL_INFO(dev_priv)->gen >= 9) - aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | - GEN9_AUX_CHANNEL_D; - - master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); - master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; - if (!master_ctl) - goto out; - - I915_WRITE_FW(GEN8_MASTER_IRQ, 0); - - /* Find, clear, then process each source of interrupt */ - - ret = gen8_gt_irq_handler(dev_priv, master_ctl); if (master_ctl & GEN8_DE_MISC_IRQ) { - tmp = I915_READ(GEN8_DE_MISC_IIR); - if (tmp) { - I915_WRITE(GEN8_DE_MISC_IIR, tmp); + iir = I915_READ(GEN8_DE_MISC_IIR); + if (iir) { + I915_WRITE(GEN8_DE_MISC_IIR, iir); ret = IRQ_HANDLED; - if (tmp & GEN8_DE_MISC_GSE) + if (iir & GEN8_DE_MISC_GSE) intel_opregion_asle_intr(dev); else DRM_ERROR("Unexpected DE Misc interrupt

"); @@ -2314,33 +2287,40 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) } if (master_ctl & GEN8_DE_PORT_IRQ) { - tmp = I915_READ(GEN8_DE_PORT_IIR); - if (tmp) { + iir = I915_READ(GEN8_DE_PORT_IIR); + if (iir) { + u32 tmp_mask; bool found = false; - u32 hotplug_trigger = 0; - - if (IS_BROXTON(dev_priv)) - hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK; - else if (IS_BROADWELL(dev_priv)) - hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG; - I915_WRITE(GEN8_DE_PORT_IIR, tmp); + I915_WRITE(GEN8_DE_PORT_IIR, iir); ret = IRQ_HANDLED; - if (tmp & aux_mask) { + tmp_mask = GEN8_AUX_CHANNEL_A; + if (INTEL_INFO(dev_priv)->gen >= 9) + tmp_mask |= GEN9_AUX_CHANNEL_B | + GEN9_AUX_CHANNEL_C | + GEN9_AUX_CHANNEL_D; + + if (iir & tmp_mask) { dp_aux_irq_handler(dev); found = true; } - if (hotplug_trigger) { - if (IS_BROXTON(dev)) - bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt); - else - ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw); - found = true; + if (IS_BROXTON(dev_priv)) { + tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; + if (tmp_mask) { + bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt); + found = true; + } + } else if (IS_BROADWELL(dev_priv)) { + tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; + if (tmp_mask) { + ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw); + found = true; + } } - if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) { + if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) { gmbus_irq_handler(dev); found = true; } @@ -2353,49 +2333,51 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) } for_each_pipe(dev_priv, pipe) { - uint32_t pipe_iir, flip_done = 0, fault_errors = 0; + u32 flip_done, fault_errors; if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) continue; - pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); - if (pipe_iir) { - ret = IRQ_HANDLED; - I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); + iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); + if (!iir) { + DRM_ERROR("The master control interrupt lied (DE PIPE)!

"); + continue; + } - if (pipe_iir & GEN8_PIPE_VBLANK && - intel_pipe_handle_vblank(dev, pipe)) - intel_check_page_flip(dev, pipe); + ret = IRQ_HANDLED; + I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); - if (INTEL_INFO(dev_priv)->gen >= 9) - flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE; - else - flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE; + if (iir & GEN8_PIPE_VBLANK && + intel_pipe_handle_vblank(dev, pipe)) + intel_check_page_flip(dev, pipe); - if (flip_done) { - intel_prepare_page_flip(dev, pipe); - intel_finish_page_flip_plane(dev, pipe); - } + flip_done = iir; + if (INTEL_INFO(dev_priv)->gen >= 9) + flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE; + else + flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE; - if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) - hsw_pipe_crc_irq_handler(dev, pipe); + if (flip_done) { + intel_prepare_page_flip(dev, pipe); + intel_finish_page_flip_plane(dev, pipe); + } - if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) - intel_cpu_fifo_underrun_irq_handler(dev_priv, - pipe); + if (iir & GEN8_PIPE_CDCLK_CRC_DONE) + hsw_pipe_crc_irq_handler(dev, pipe); + if (iir & GEN8_PIPE_FIFO_UNDERRUN) + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); - if (INTEL_INFO(dev_priv)->gen >= 9) - fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS; - else - fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS; + fault_errors = iir; + if (INTEL_INFO(dev_priv)->gen >= 9) + fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; + else + fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; - if (fault_errors) - DRM_ERROR("Fault errors on pipe %c

: 0x%08x", - pipe_name(pipe), - pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); - } else - DRM_ERROR("The master control interrupt lied (DE PIPE)!

"); + if (fault_errors) + DRM_ERROR("Fault errors on pipe %c

: 0x%08x", + pipe_name(pipe), + fault_errors); } if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) && @@ -2405,15 +2387,15 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) * scheme also closed the SDE interrupt handling race we've seen * on older pch-split platforms. But this needs testing. */ - u32 pch_iir = I915_READ(SDEIIR); - if (pch_iir) { - I915_WRITE(SDEIIR, pch_iir); + iir = I915_READ(SDEIIR); + if (iir) { + I915_WRITE(SDEIIR, iir); ret = IRQ_HANDLED; if (HAS_PCH_SPT(dev_priv)) - spt_irq_handler(dev, pch_iir); + spt_irq_handler(dev, iir); else - cpt_irq_handler(dev, pch_iir); + cpt_irq_handler(dev, iir); } else { /* * Like on previous PCH there seems to be something @@ -2423,10 +2405,36 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) } } + return ret; +} + +static irqreturn_t gen8_irq_handler(int irq, void *arg) +{ + struct drm_device *dev = arg; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 master_ctl; + irqreturn_t ret; + + if (!intel_irqs_enabled(dev_priv)) + return IRQ_NONE; + + master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); + master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; + if (!master_ctl) + return IRQ_NONE; + + I915_WRITE_FW(GEN8_MASTER_IRQ, 0); + + /* IRQs are synced during runtime_suspend, we don't require a wakeref */ + disable_rpm_wakeref_asserts(dev_priv); + + /* Find, clear, then process each source of interrupt */ + ret = gen8_gt_irq_handler(dev_priv, master_ctl); + ret |= gen8_de_irq_handler(dev_priv, master_ctl); + I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); POSTING_READ_FW(GEN8_MASTER_IRQ); -out: enable_rpm_wakeref_asserts(dev_priv); return ret; @@ -2949,14 +2957,44 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) ring->hangcheck.deadlock = 0; } -static enum intel_ring_hangcheck_action -ring_stuck(struct intel_engine_cs *ring, u64 acthd) +static bool subunits_stuck(struct intel_engine_cs *ring) { - struct drm_device *dev = ring->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 tmp; + u32 instdone[I915_NUM_INSTDONE_REG]; + bool stuck; + int i; + + if (ring->id != RCS) + return true; + + i915_get_extra_instdone(ring->dev, instdone); + + /* There might be unstable subunit states even when + * actual head is not moving. Filter out the unstable ones by + * accumulating the undone -> done transitions and only + * consider those as progress. + */ + stuck = true; + for (i = 0; i < I915_NUM_INSTDONE_REG; i++) { + const u32 tmp = instdone[i] | ring->hangcheck.instdone[i]; + + if (tmp != ring->hangcheck.instdone[i]) + stuck = false; + + ring->hangcheck.instdone[i] |= tmp; + } + return stuck; +} + +static enum intel_ring_hangcheck_action +head_stuck(struct intel_engine_cs *ring, u64 acthd) +{ if (acthd != ring->hangcheck.acthd) { + + /* Clear subunit states on head movement */ + memset(ring->hangcheck.instdone, 0, + sizeof(ring->hangcheck.instdone)); + if (acthd > ring->hangcheck.max_acthd) { ring->hangcheck.max_acthd = acthd; return HANGCHECK_ACTIVE; @@ -2965,6 +3003,24 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd) return HANGCHECK_ACTIVE_LOOP; } + if (!subunits_stuck(ring)) + return HANGCHECK_ACTIVE; + + return HANGCHECK_HUNG; +} + +static enum intel_ring_hangcheck_action +ring_stuck(struct intel_engine_cs *ring, u64 acthd) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum intel_ring_hangcheck_action ha; + u32 tmp; + + ha = head_stuck(ring, acthd); + if (ha != HANGCHECK_HUNG) + return ha; + if (IS_GEN2(dev)) return HANGCHECK_HUNG; @@ -3032,6 +3088,12 @@ static void i915_hangcheck_elapsed(struct work_struct *work) */ DISABLE_RPM_WAKEREF_ASSERTS(dev_priv); + /* As enabling the GPU requires fairly extensive mmio access, + * periodically arm the mmio checker to see if we are triggering + * any invalid access. + */ + intel_uncore_arm_unclaimed_mmio_detection(dev_priv); + for_each_ring(ring, dev_priv, i) { u64 acthd; u32 seqno; @@ -3106,7 +3168,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work) if (ring->hangcheck.score > 0) ring->hangcheck.score--; + /* Clear head and subunit states on seqno movement */ ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0; + + memset(ring->hangcheck.instdone, 0, + sizeof(ring->hangcheck.instdone)); } ring->hangcheck.seqno = seqno;

index 835d6099c769..8d90c256520a 100644

--- a/

+++ b/ diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.cindex 835d6099c769..8d90c256520a 100644--- a/ drivers/gpu/drm/i915/i915_params.c +++ b/ drivers/gpu/drm/i915/i915_params.c @@ -22,6 +22,7 @@ * IN THE SOFTWARE. */ +#include "i915_params.h" #include "i915_drv.h" struct i915_params i915 __read_mostly = {

new file mode 100644

index 000000000000..529929073120

--- /dev/null

+++ b/ diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.hnew file mode 100644index 000000000000..529929073120--- /dev/null+++ b/ drivers/gpu/drm/i915/i915_params.h @@ -0,0 +1,68 @@ +/* + * Copyright © 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef _I915_PARAMS_H_ +#define _I915_PARAMS_H_ + +#include <linux/cache.h> /* for __read_mostly */ + +struct i915_params { + int modeset; + int panel_ignore_lid; + int semaphores; + int lvds_channel_mode; + int panel_use_ssc; + int vbt_sdvo_panel_type; + int enable_rc6; + int enable_dc; + int enable_fbc; + int enable_ppgtt; + int enable_execlists; + int enable_psr; + unsigned int preliminary_hw_support; + int disable_power_well; + int enable_ips; + int invert_brightness; + int enable_cmd_parser; + int guc_log_level; + int use_mmio_flip; + int mmio_debug; + int edp_vswing; + /* leave bools at the end to not create holes */ + bool enable_hangcheck; + bool fastboot; + bool prefault_disable; + bool load_detect_test; + bool reset; + bool disable_display; + bool disable_vtd_wa; + bool enable_guc_submission; + bool verbose_state_checks; + bool nuclear_pageflip; +}; + +extern struct i915_params i915 __read_mostly; + +#endif +

index 007ae83a4086..0a988895165f 100644

--- a/

+++ b/ diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.hindex 007ae83a4086..0a988895165f 100644--- a/ drivers/gpu/drm/i915/i915_reg.h +++ b/ drivers/gpu/drm/i915/i915_reg.h @@ -1711,6 +1711,11 @@ enum skl_disp_power_wells { #define FPGA_DBG _MMIO(0x42300) #define FPGA_DBG_RM_NOCLAIM (1<<31) +#define CLAIM_ER _MMIO(VLV_DISPLAY_BASE + 0x2028) +#define CLAIM_ER_CLR (1 << 31) +#define CLAIM_ER_OVERFLOW (1 << 16) +#define CLAIM_ER_CTR_MASK 0xffff + #define DERRMR _MMIO(0x44050) /* Note that HBLANK events are reserved on bdw+ */ #define DERRMR_PIPEA_SCANLINE (1<<0)

index 37e3f0ddf8e0..c6188dddb341 100644

--- a/

+++ b/ diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.cindex 37e3f0ddf8e0..c6188dddb341 100644--- a/ drivers/gpu/drm/i915/i915_sysfs.c +++ b/ drivers/gpu/drm/i915/i915_sysfs.c @@ -164,7 +164,7 @@ i915_l3_read(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t count) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct drm_minor *dminor = dev_to_drm_minor(dev); struct drm_device *drm_dev = dminor->dev; struct drm_i915_private *dev_priv = drm_dev->dev_private; @@ -200,7 +200,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t count) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct drm_minor *dminor = dev_to_drm_minor(dev); struct drm_device *drm_dev = dminor->dev; struct drm_i915_private *dev_priv = drm_dev->dev_private; @@ -521,7 +521,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj, loff_t off, size_t count) { - struct device *kdev = container_of(kobj, struct device, kobj); + struct device *kdev = kobj_to_dev(kobj); struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; struct i915_error_state_file_priv error_priv; @@ -556,7 +556,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { - struct device *kdev = container_of(kobj, struct device, kobj); + struct device *kdev = kobj_to_dev(kobj); struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; int ret;

index d0b1c9afa35e..4625f8a9ba12 100644

--- a/

+++ b/ diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.cindex d0b1c9afa35e..4625f8a9ba12 100644--- a/ drivers/gpu/drm/i915/intel_atomic.c +++ b/ drivers/gpu/drm/i915/intel_atomic.c @@ -308,5 +308,5 @@ void intel_atomic_state_clear(struct drm_atomic_state *s) { struct intel_atomic_state *state = to_intel_atomic_state(s); drm_atomic_state_default_clear(&state->base); - state->dpll_set = false; + state->dpll_set = state->modeset = false; }

index c6bb0fc1edfb..e0b851a0004a 100644

--- a/

+++ b/ diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.cindex c6bb0fc1edfb..e0b851a0004a 100644--- a/ drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/ drivers/gpu/drm/i915/intel_atomic_plane.c @@ -152,9 +152,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane, intel_state->clip.x1 = 0; intel_state->clip.y1 = 0; intel_state->clip.x2 = - crtc_state->base.active ? crtc_state->pipe_src_w : 0; + crtc_state->base.enable ? crtc_state->pipe_src_w : 0; intel_state->clip.y2 = - crtc_state->base.active ? crtc_state->pipe_src_h : 0; + crtc_state->base.enable ? crtc_state->pipe_src_h : 0; if (state->fb && intel_rotation_90_or_270(state->rotation)) { if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || @@ -194,8 +194,16 @@ static void intel_plane_atomic_update(struct drm_plane *plane, struct intel_plane *intel_plane = to_intel_plane(plane); struct intel_plane_state *intel_state = to_intel_plane_state(plane->state); + struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc; + struct drm_crtc_state *crtc_state = + drm_atomic_get_existing_crtc_state(old_state->state, crtc); - intel_plane->commit_plane(plane, intel_state); + if (intel_state->visible) + intel_plane->update_plane(plane, + to_intel_crtc_state(crtc_state), + intel_state); + else + intel_plane->disable_plane(plane, crtc); } const struct drm_plane_helper_funcs intel_plane_helper_funcs = {

index eba3e0f87181..bf62a19c8f69 100644

--- a/

+++ b/ diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.cindex eba3e0f87181..bf62a19c8f69 100644--- a/ drivers/gpu/drm/i915/intel_bios.c +++ b/ drivers/gpu/drm/i915/intel_bios.c @@ -31,11 +31,49 @@ #include "i915_drv.h" #include "intel_bios.h" +/** + * DOC: Video BIOS Table (VBT) + * + * The Video BIOS Table, or VBT, provides platform and board specific + * configuration information to the driver that is not discoverable or available + * through other means. The configuration is mostly related to display + * hardware. The VBT is available via the ACPI OpRegion or, on older systems, in + * the PCI ROM. + * + * The VBT consists of a VBT Header (defined as &struct vbt_header), a BDB + * Header (&struct bdb_header), and a number of BIOS Data Blocks (BDB) that + * contain the actual configuration information. The VBT Header, and thus the + * VBT, begins with "$VBT" signature. The VBT Header contains the offset of the + * BDB Header. The data blocks are concatenated after the BDB Header. The data + * blocks have a 1-byte Block ID, 2-byte Block Size, and Block Size bytes of + * data. (Block 53, the MIPI Sequence Block is an exception.) + * + * The driver parses the VBT during load. The relevant information is stored in + * driver private data for ease of use, and the actual VBT is not read after + * that. + */ + #define SLAVE_ADDR1 0x70 #define SLAVE_ADDR2 0x72 static int panel_type; +/* Get BDB block size given a pointer to Block ID. */ +static u32 _get_blocksize(const u8 *block_base) +{ + /* The MIPI Sequence Block v3+ has a separate size field. */ + if (*block_base == BDB_MIPI_SEQUENCE && *(block_base + 3) >= 3) + return *((const u32 *)(block_base + 4)); + else + return *((const u16 *)(block_base + 1)); +} + +/* Get BDB block size give a pointer to data after Block ID and Block Size. */ +static u32 get_blocksize(const void *block_data) +{ + return _get_blocksize(block_data - 3); +} + static const void * find_section(const void *_bdb, int section_id) { @@ -52,14 +90,8 @@ find_section(const void *_bdb, int section_id) /* walk the sections looking for section_id */ while (index + 3 < total) { current_id = *(base + index); - index++; - - current_size = *((const u16 *)(base + index)); - index += 2; - - /* The MIPI Sequence Block v3+ has a separate size field. */ - if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3) - current_size = *((const u32 *)(base + index + 1)); + current_size = _get_blocksize(base + index); + index += 3; if (index + current_size > total) return NULL; @@ -73,16 +105,6 @@ find_section(const void *_bdb, int section_id) return NULL; } -static u16 -get_blocksize(const void *p) -{ - u16 *block_ptr, block_size; - - block_ptr = (u16 *)((char *)p - 2); - block_size = *block_ptr; - return block_size; -} - static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, const struct lvds_dvo_timing *dvo_timing) @@ -675,84 +697,13 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time; } -static u8 *goto_next_sequence(u8 *data, int *size) -{ - u16 len; - int tmp = *size; - - if (--tmp < 0) - return NULL; - - /* goto first element */ - data++; - while (1) { - switch (*data) { - case MIPI_SEQ_ELEM_SEND_PKT: - /* - * skip by this element payload size - * skip elem id, command flag and data type - */ - tmp -= 5; - if (tmp < 0) - return NULL; - - data += 3; - len = *((u16 *)data); - - tmp -= len; - if (tmp < 0) - return NULL; - - /* skip by len */ - data = data + 2 + len; - break; - case MIPI_SEQ_ELEM_DELAY: - /* skip by elem id, and delay is 4 bytes */ - tmp -= 5; - if (tmp < 0) - return NULL; - - data += 5; - break; - case MIPI_SEQ_ELEM_GPIO: - tmp -= 3; - if (tmp < 0) - return NULL; - - data += 3; - break; - default: - DRM_ERROR("Unknown element

"); - return NULL; - } - - /* end of sequence ? */ - if (*data == 0) - break; - } - - /* goto next sequence or end of block byte */ - if (--tmp < 0) - return NULL; - - data++; - - /* update amount of data left for the sequence block to be parsed */ - *size = tmp; - return data; -} - static void -parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) +parse_mipi_config(struct drm_i915_private *dev_priv, + const struct bdb_header *bdb) { const struct bdb_mipi_config *start; - const struct bdb_mipi_sequence *sequence; const struct mipi_config *config; const struct mipi_pps_data *pps; - u8 *data; - const u8 *seq_data; - int i, panel_id, seq_size; - u16 block_size; /* parse MIPI blocks only if LFP type is MIPI */ if (!dev_priv->vbt.has_mipi) @@ -798,104 +749,233 @@ parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) /* We have mandatory mipi config blocks. Initialize as generic panel */ dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID; +} - /* Check if we have sequence block as well */ - sequence = find_section(bdb, BDB_MIPI_SEQUENCE); - if (!sequence) { - DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete

"); - return; +/* Find the sequence block and size for the given panel. */ +static const u8 * +find_panel_sequence_block(const struct bdb_mipi_sequence *sequence, + u16 panel_id, u32 *seq_size) +{ + u32 total = get_blocksize(sequence); + const u8 *data = &sequence->data[0]; + u8 current_id; + u32 current_size; + int header_size = sequence->version >= 3 ? 5 : 3; + int index = 0; + int i; + + /* skip new block size */ + if (sequence->version >= 3) + data += 4; + + for (i = 0; i < MAX_MIPI_CONFIGURATIONS && index < total; i++) { + if (index + header_size > total) { + DRM_ERROR("Invalid sequence block (header)

"); + return NULL; + } + + current_id = *(data + index); + if (sequence->version >= 3) + current_size = *((const u32 *)(data + index + 1)); + else + current_size = *((const u16 *)(data + index + 1)); + + index += header_size; + + if (index + current_size > total) { + DRM_ERROR("Invalid sequence block

"); + return NULL; + } + + if (current_id == panel_id) { + *seq_size = current_size; + return data + index; + } + + index += current_size; } - /* Fail gracefully for forward incompatible sequence block. */ - if (sequence->version >= 3) { - DRM_ERROR("Unable to parse MIPI Sequence Block v3+

"); - return; + DRM_ERROR("Sequence block detected but no valid configuration

"); + + return NULL; +} + +static int goto_next_sequence(const u8 *data, int index, int total) +{ + u16 len; + + /* Skip Sequence Byte. */ + for (index = index + 1; index < total; index += len) { + u8 operation_byte = *(data + index); + index++; + + switch (operation_byte) { + case MIPI_SEQ_ELEM_END: + return index; + case MIPI_SEQ_ELEM_SEND_PKT: + if (index + 4 > total) + return 0; + + len = *((const u16 *)(data + index + 2)) + 4; + break; + case MIPI_SEQ_ELEM_DELAY: + len = 4; + break; + case MIPI_SEQ_ELEM_GPIO: + len = 2; + break; + case MIPI_SEQ_ELEM_I2C: + if (index + 7 > total) + return 0; + len = *(data + index + 6) + 7; + break; + default: + DRM_ERROR("Unknown operation byte

"); + return 0; + } } - DRM_DEBUG_DRIVER("Found MIPI sequence block

"); + return 0; +} - block_size = get_blocksize(sequence); +static int goto_next_sequence_v3(const u8 *data, int index, int total) +{ + int seq_end; + u16 len; + u32 size_of_sequence; /* - * parse the sequence block for individual sequences + * Could skip sequence based on Size of Sequence alone, but also do some + * checking on the structure. */ - dev_priv->vbt.dsi.seq_version = sequence->version; + if (total < 5) { + DRM_ERROR("Too small sequence size

"); + return 0; + } - seq_data = &sequence->data[0]; + /* Skip Sequence Byte. */ + index++; /* - * sequence block is variable length and hence we need to parse and - * get the sequence data for specific panel id + * Size of Sequence. Excludes the Sequence Byte and the size itself, + * includes MIPI_SEQ_ELEM_END byte, excludes the final MIPI_SEQ_END + * byte. */ - for (i = 0; i < MAX_MIPI_CONFIGURATIONS; i++) { - panel_id = *seq_data; - seq_size = *((u16 *) (seq_data + 1)); - if (panel_id == panel_type) - break; + size_of_sequence = *((const uint32_t *)(data + index)); + index += 4; - /* skip the sequence including seq header of 3 bytes */ - seq_data = seq_data + 3 + seq_size; - if ((seq_data - &sequence->data[0]) > block_size) { - DRM_ERROR("Sequence start is beyond sequence block size, corrupted sequence block

"); - return; + seq_end = index + size_of_sequence; + if (seq_end > total) { + DRM_ERROR("Invalid sequence size

"); + return 0; + } + + for (; index < total; index += len) { + u8 operation_byte = *(data + index); + index++; + + if (operation_byte == MIPI_SEQ_ELEM_END) { + if (index != seq_end) { + DRM_ERROR("Invalid element structure

"); + return 0; + } + return index; + } + + len = *(data + index); + index++; + + /* + * FIXME: Would be nice to check elements like for v1/v2 in + * goto_next_sequence() above. + */ + switch (operation_byte) { + case MIPI_SEQ_ELEM_SEND_PKT: + case MIPI_SEQ_ELEM_DELAY: + case MIPI_SEQ_ELEM_GPIO: + case MIPI_SEQ_ELEM_I2C: + case MIPI_SEQ_ELEM_SPI: + case MIPI_SEQ_ELEM_PMIC: + break; + default: + DRM_ERROR("Unknown operation byte %u

", + operation_byte); + break; } } - if (i == MAX_MIPI_CONFIGURATIONS) { - DRM_ERROR("Sequence block detected but no valid configuration

"); + return 0; +} + +static void +parse_mipi_sequence(struct drm_i915_private *dev_priv, + const struct bdb_header *bdb) +{ + const struct bdb_mipi_sequence *sequence; + const u8 *seq_data; + u32 seq_size; + u8 *data; + int index = 0; + + /* Only our generic panel driver uses the sequence block. */ + if (dev_priv->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID) + return; + + sequence = find_section(bdb, BDB_MIPI_SEQUENCE); + if (!sequence) { + DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete

"); return; } - /* check if found sequence is completely within the sequence block - * just being paranoid */ - if (seq_size > block_size) { - DRM_ERROR("Corrupted sequence/size, bailing out

"); + /* Fail gracefully for forward incompatible sequence block. */ + if (sequence->version >= 4) { + DRM_ERROR("Unable to parse MIPI Sequence Block v%u

", + sequence->version); return; } - /* skip the panel id(1 byte) and seq size(2 bytes) */ - dev_priv->vbt.dsi.data = kmemdup(seq_data + 3, seq_size, GFP_KERNEL); - if (!dev_priv->vbt.dsi.data) + DRM_DEBUG_DRIVER("Found MIPI sequence block v%u

", sequence->version); + + seq_data = find_panel_sequence_block(sequence, panel_type, &seq_size); + if (!seq_data) return; - /* - * loop into the sequence data and split into multiple sequneces - * There are only 5 types of sequences as of now - */ - data = dev_priv->vbt.dsi.data; - dev_priv->vbt.dsi.size = seq_size; + data = kmemdup(seq_data, seq_size, GFP_KERNEL); + if (!data) + return; - /* two consecutive 0x00 indicate end of all sequences */ - while (1) { - int seq_id = *data; - if (MIPI_SEQ_MAX > seq_id && seq_id > MIPI_SEQ_UNDEFINED) { - dev_priv->vbt.dsi.sequence[seq_id] = data; - DRM_DEBUG_DRIVER("Found mipi sequence - %d

", seq_id); - } else { - DRM_ERROR("undefined sequence

"); + /* Parse the sequences, store pointers to each sequence. */ + for (;;) { + u8 seq_id = *(data + index); + if (seq_id == MIPI_SEQ_END) + break; + + if (seq_id >= MIPI_SEQ_MAX) { + DRM_ERROR("Unknown sequence %u

", seq_id); goto err; } - /* partial parsing to skip elements */ - data = goto_next_sequence(data, &seq_size); + dev_priv->vbt.dsi.sequence[seq_id] = data + index; - if (data == NULL) { - DRM_ERROR("Sequence elements going beyond block itself. Sequence block parsing failed

"); + if (sequence->version >= 3) + index = goto_next_sequence_v3(data, index, seq_size); + else + index = goto_next_sequence(data, index, seq_size); + if (!index) { + DRM_ERROR("Invalid sequence %u

", seq_id); goto err; } - - if (*data == 0) - break; /* end of sequence reached */ } - DRM_DEBUG_DRIVER("MIPI related vbt parsing complete

"); + dev_priv->vbt.dsi.data = data; + dev_priv->vbt.dsi.size = seq_size; + dev_priv->vbt.dsi.seq_version = sequence->version; + + DRM_DEBUG_DRIVER("MIPI related VBT parsing complete

"); return; -err: - kfree(dev_priv->vbt.dsi.data); - dev_priv->vbt.dsi.data = NULL; - /* error during parsing so set all pointers to null - * because of partial parsing */ +err: + kfree(data); memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence)); } @@ -1088,7 +1168,12 @@ parse_device_mapping(struct drm_i915_private *dev_priv, DRM_DEBUG_KMS("No general definition block is found, no devices defined.

"); return; } - if (bdb->version < 195) { + if (bdb->version < 106) { + expected_size = 22; + } else if (bdb->version < 109) { + expected_size = 27; + } else if (bdb->version < 195) { + BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33); expected_size = sizeof(struct old_child_dev_config); } else if (bdb->version == 195) { expected_size = 37; @@ -1101,18 +1186,18 @@ parse_device_mapping(struct drm_i915_private *dev_priv, bdb->version, expected_size); } - /* The legacy sized child device config is the minimum we need. */ - if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) { - DRM_ERROR("Child device config size %u is too small.

", - p_defs->child_dev_size); - return; - } - /* Flag an error for unexpected size, but continue anyway. */ if (p_defs->child_dev_size != expected_size) DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)

", p_defs->child_dev_size, expected_size, bdb->version); + /* The legacy sized child device config is the minimum we need. */ + if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) { + DRM_DEBUG_KMS("Child device config size %u is too small.

", + p_defs->child_dev_size); + return; + } + /* get the block size of general definitions */ block_size = get_blocksize(p_defs); /* get the number of child device */ @@ -1285,7 +1370,7 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size) /** * intel_bios_init - find VBT and initialize settings from the BIOS - * @dev: DRM device + * @dev_priv: i915 device instance * * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers * to appropriate values. @@ -1337,7 +1422,8 @@ intel_bios_init(struct drm_i915_private *dev_priv) parse_driver_features(dev_priv, bdb); parse_edp(dev_priv, bdb); parse_psr(dev_priv, bdb); - parse_mipi(dev_priv, bdb); + parse_mipi_config(dev_priv, bdb); + parse_mipi_sequence(dev_priv, bdb); parse_ddi_ports(dev_priv, bdb); if (bios)

index 54eac1003a1e..350d4e0f75a4 100644

--- a/

+++ b/ diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.hindex 54eac1003a1e..350d4e0f75a4 100644--- a/ drivers/gpu/drm/i915/intel_bios.h +++ b/ drivers/gpu/drm/i915/intel_bios.h @@ -25,25 +25,43 @@ * */ -#ifndef _I830_BIOS_H_ -#define _I830_BIOS_H_ - +#ifndef _INTEL_BIOS_H_ +#define _INTEL_BIOS_H_ + +/** + * struct vbt_header - VBT Header structure + * @signature: VBT signature, always starts with "$VBT" + * @version: Version of this structure + * @header_size: Size of this structure + * @vbt_size: Size of VBT (VBT Header, BDB Header and data blocks) + * @vbt_checksum: Checksum + * @reserved0: Reserved + * @bdb_offset: Offset of &struct bdb_header from beginning of VBT + * @aim_offset: Offsets of add-in data blocks from beginning of VBT + */ struct vbt_header { - u8 signature[20]; /**< Always starts with 'VBT$' */ - u16 version; /**< decimal */ - u16 header_size; /**< in bytes */ - u16 vbt_size; /**< in bytes */ + u8 signature[20]; + u16 version; + u16 header_size; + u16 vbt_size; u8 vbt_checksum; u8 reserved0; - u32 bdb_offset; /**< from beginning of VBT */ - u32 aim_offset[4]; /**< from beginning of VBT */ + u32 bdb_offset; + u32 aim_offset[4]; } __packed; +/** + * struct bdb_header - BDB Header structure + * @signature: BDB signature "BIOS_DATA_BLOCK" + * @version: Version of the data block definitions + * @header_size: Size of this structure + * @bdb_size: Size of BDB (BDB Header and data blocks) + */ struct bdb_header { - u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */ - u16 version; /**< decimal */ - u16 header_size; /**< in bytes */ - u16 bdb_size; /**< in bytes */ + u8 signature[16]; + u16 version; + u16 header_size; + u16 bdb_size; } __packed; /* strictly speaking, this is a "skip" block, but it has interesting info */ @@ -936,21 +954,29 @@ struct bdb_mipi_sequence { /* MIPI Sequnece Block definitions */ enum mipi_seq { - MIPI_SEQ_UNDEFINED = 0, + MIPI_SEQ_END = 0, MIPI_SEQ_ASSERT_RESET, MIPI_SEQ_INIT_OTP, MIPI_SEQ_DISPLAY_ON, MIPI_SEQ_DISPLAY_OFF, MIPI_SEQ_DEASSERT_RESET, + MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */ + MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */ + MIPI_SEQ_TEAR_ON, /* sequence block v2+ */ + MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */ + MIPI_SEQ_POWER_ON, /* sequence block v3+ */ + MIPI_SEQ_POWER_OFF, /* sequence block v3+ */ MIPI_SEQ_MAX }; enum mipi_seq_element { - MIPI_SEQ_ELEM_UNDEFINED = 0, + MIPI_SEQ_ELEM_END = 0, MIPI_SEQ_ELEM_SEND_PKT, MIPI_SEQ_ELEM_DELAY, MIPI_SEQ_ELEM_GPIO, - MIPI_SEQ_ELEM_STATUS, + MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */ + MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */ + MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */ MIPI_SEQ_ELEM_MAX }; @@ -965,4 +991,4 @@ enum mipi_gpio_pin_index { MIPI_GPIO_MAX }; -#endif /* _I830_BIOS_H_ */ +#endif /* _INTEL_BIOS_H_ */

index 9bb63a85997a..5c2f9a40c81b 100644

--- a/

+++ b/ diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.cindex 9bb63a85997a..5c2f9a40c81b 100644--- a/ drivers/gpu/drm/i915/intel_csr.c +++ b/ drivers/gpu/drm/i915/intel_csr.c @@ -44,6 +44,8 @@ #define I915_CSR_SKL "i915/skl_dmc_ver1.bin" #define I915_CSR_BXT "i915/bxt_dmc_ver1.bin" +#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares" + MODULE_FIRMWARE(I915_CSR_SKL); MODULE_FIRMWARE(I915_CSR_BXT); @@ -278,10 +280,11 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, csr->version = css_header->version; - if (IS_SKYLAKE(dev) && csr->version < SKL_CSR_VERSION_REQUIRED) { + if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && + csr->version < SKL_CSR_VERSION_REQUIRED) { DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u," " please upgrade to v%u.%u or later" - " [https://01.org/linuxgraphics/intel-linux-graphics-firmwares].

", + " [" FIRMWARE_URL "].

", CSR_VERSION_MAJOR(csr->version), CSR_VERSION_MINOR(csr->version), CSR_VERSION_MAJOR(SKL_CSR_VERSION_REQUIRED), @@ -399,7 +402,10 @@ out: CSR_VERSION_MAJOR(csr->version), CSR_VERSION_MINOR(csr->version)); } else { - DRM_ERROR("Failed to load DMC firmware, disabling rpm

"); + dev_notice(dev_priv->dev->dev, + "Failed to load DMC firmware" + " [" FIRMWARE_URL "]," + " disabling runtime power management.

"); } release_firmware(fw); @@ -421,7 +427,7 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) if (!HAS_CSR(dev_priv)) return; - if (IS_SKYLAKE(dev_priv)) + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) csr->fw_path = I915_CSR_SKL; else if (IS_BROXTON(dev_priv)) csr->fw_path = I915_CSR_BXT;

index e6408e5583d7..1f9a3687b540 100644

--- a/

+++ b/ diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.cindex e6408e5583d7..1f9a3687b540 100644--- a/ drivers/gpu/drm/i915/intel_ddi.c +++ b/ drivers/gpu/drm/i915/intel_ddi.c @@ -133,38 +133,38 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = { { 0x00002016, 0x000000A0, 0x0 }, { 0x00005012, 0x0000009B, 0x0 }, { 0x00007011, 0x00000088, 0x0 }, - { 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ + { 0x80009010, 0x000000C0, 0x1 }, { 0x00002016, 0x0000009B, 0x0 }, { 0x00005012, 0x00000088, 0x0 }, - { 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ + { 0x80007011, 0x000000C0, 0x1 }, { 0x00002016, 0x000000DF, 0x0 }, - { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ + { 0x80005012, 0x000000C0, 0x1 }, }; /* Skylake U */ static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = { { 0x0000201B, 0x000000A2, 0x0 }, { 0x00005012, 0x00000088, 0x0 }, - { 0x00007011, 0x00000087, 0x0 }, - { 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ + { 0x80007011, 0x000000CD, 0x0 }, + { 0x80009010, 0x000000C0, 0x1 }, { 0x0000201B, 0x0000009D, 0x0 }, - { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ - { 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ + { 0x80005012, 0x000000C0, 0x1 }, + { 0x80007011, 0x000000C0, 0x1 }, { 0x00002016, 0x00000088, 0x0 }, - { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ + { 0x80005012, 0x000000C0, 0x1 }, }; /* Skylake Y */ static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = { { 0x00000018, 0x000000A2, 0x0 }, { 0x00005012, 0x00000088, 0x0 }, - { 0x00007011, 0x00000087, 0x0 }, - { 0x80009010, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */ + { 0x80007011, 0x000000CD, 0x0 }, + { 0x80009010, 0x000000C0, 0x3 }, { 0x00000018, 0x0000009D, 0x0 }, - { 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */ - { 0x80007011, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */ + { 0x80005012, 0x000000C0, 0x3 }, + { 0x80007011, 0x000000C0, 0x3 }, { 0x00000018, 0x00000088, 0x0 }, - { 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */ + { 0x80005012, 0x000000C0, 0x3 }, }; /* @@ -226,26 +226,26 @@ static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = { { 0x00000018, 0x000000A1, 0x0 }, { 0x00000018, 0x00000098, 0x0 }, { 0x00004013, 0x00000088, 0x0 }, - { 0x00006012, 0x00000087, 0x0 }, + { 0x80006012, 0x000000CD, 0x1 }, { 0x00000018, 0x000000DF, 0x0 }, - { 0x00003015, 0x00000087, 0x0 }, /* Default */ - { 0x00003015, 0x000000C7, 0x0 }, - { 0x00000018, 0x000000C7, 0x0 }, + { 0x80003015, 0x000000CD, 0x1 }, /* Default */ + { 0x80003015, 0x000000C0, 0x1 }, + { 0x80000018, 0x000000C0, 0x1 }, }; /* Skylake Y */ static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = { { 0x00000018, 0x000000A1, 0x0 }, { 0x00005012, 0x000000DF, 0x0 }, - { 0x00007011, 0x00000084, 0x0 }, + { 0x80007011, 0x000000CB, 0x3 }, { 0x00000018, 0x000000A4, 0x0 }, { 0x00000018, 0x0000009D, 0x0 }, { 0x00004013, 0x00000080, 0x0 }, - { 0x00006013, 0x000000C7, 0x0 }, + { 0x80006013, 0x000000C0, 0x3 }, { 0x00000018, 0x0000008A, 0x0 }, - { 0x00003015, 0x000000C7, 0x0 }, /* Default */ - { 0x80003015, 0x000000C7, 0x7 }, /* Uses I_boost level 0x7 */ - { 0x00000018, 0x000000C7, 0x0 }, + { 0x80003015, 0x000000C0, 0x3 }, /* Default */ + { 0x80003015, 0x000000C0, 0x3 }, + { 0x80000018, 0x000000C0, 0x3 }, }; struct bxt_ddi_buf_trans { @@ -301,8 +301,8 @@ static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = { { 154, 0x9A, 1, 128, true }, /* 9: 1200 0 */ }; -static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level, - enum port port, int type); +static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv, + u32 level, enum port port, int type); static void ddi_get_encoder_port(struct intel_encoder *intel_encoder, struct intel_digital_port **dig_port, @@ -342,81 +342,50 @@ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) return port; } -static bool -intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port) -{ - return i915_mmio_reg_valid(intel_dig_port->hdmi.hdmi_reg); -} - -static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev, - int *n_entries) +static const struct ddi_buf_trans * +skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) { - const struct ddi_buf_trans *ddi_translations; - - if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) { - ddi_translations = skl_y_ddi_translations_dp; + if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) { *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp); - } else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) { - ddi_translations = skl_u_ddi_translations_dp; + return skl_y_ddi_translations_dp; + } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) { *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp); + return skl_u_ddi_translations_dp; } else { - ddi_translations = skl_ddi_translations_dp; *n_entries = ARRAY_SIZE(skl_ddi_translations_dp); + return skl_ddi_translations_dp; } - - return ddi_translations; } -static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev, - int *n_entries) +static const struct ddi_buf_trans * +skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) { - struct drm_i915_private *dev_priv = dev->dev_private; - const struct ddi_buf_trans *ddi_translations; - - if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) { - if (dev_priv->edp_low_vswing) { - ddi_translations = skl_y_ddi_translations_edp; + if (dev_priv->edp_low_vswing) { + if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) { *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp); - } else { - ddi_translations = skl_y_ddi_translations_dp; - *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp); - } - } else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) { - if (dev_priv->edp_low_vswing) { - ddi_translations = skl_u_ddi_translations_edp; + return skl_y_ddi_translations_edp; + } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) { *n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp); + return skl_u_ddi_translations_edp; } else { - ddi_translations = skl_u_ddi_translations_dp; - *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp); - } - } else { - if (dev_priv->edp_low_vswing) { - ddi_translations = skl_ddi_translations_edp; *n_entries = ARRAY_SIZE(skl_ddi_translations_edp); - } else { - ddi_translations = skl_ddi_translations_dp; - *n_entries = ARRAY_SIZE(skl_ddi_translations_dp); + return skl_ddi_translations_edp; } } - return ddi_translations; + return skl_get_buf_trans_dp(dev_priv, n_entries); } static const struct ddi_buf_trans * -skl_get_buf_trans_hdmi(struct drm_device *dev, - int *n_entries) +skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) { - const struct ddi_buf_trans *ddi_translations; - - if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) { - ddi_translations = skl_y_ddi_translations_hdmi; + if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) { *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi); + return skl_y_ddi_translations_hdmi; } else { - ddi_translations = skl_ddi_translations_hdmi; *n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi); + return skl_ddi_translations_hdmi; } - - return ddi_translations; } /* @@ -426,42 +395,52 @@ skl_get_buf_trans_hdmi(struct drm_device *dev, * in either FDI or DP modes only, as HDMI connections will work with both * of those */ -static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, - bool supports_hdmi) +void intel_prepare_ddi_buffer(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 iboost_bit = 0; int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry, size; - int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; + int hdmi_level; + enum port port; const struct ddi_buf_trans *ddi_translations_fdi; const struct ddi_buf_trans *ddi_translations_dp; const struct ddi_buf_trans *ddi_translations_edp; const struct ddi_buf_trans *ddi_translations_hdmi; const struct ddi_buf_trans *ddi_translations; - if (IS_BROXTON(dev)) { - if (!supports_hdmi) + port = intel_ddi_get_encoder_port(encoder); + hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; + + if (IS_BROXTON(dev_priv)) { + if (encoder->type != INTEL_OUTPUT_HDMI) return; /* Vswing programming for HDMI */ - bxt_ddi_vswing_sequence(dev, hdmi_level, port, + bxt_ddi_vswing_sequence(dev_priv, hdmi_level, port, INTEL_OUTPUT_HDMI); return; - } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { + } + + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { ddi_translations_fdi = NULL; ddi_translations_dp = - skl_get_buf_trans_dp(dev, &n_dp_entries); + skl_get_buf_trans_dp(dev_priv, &n_dp_entries); ddi_translations_edp = - skl_get_buf_trans_edp(dev, &n_edp_entries); + skl_get_buf_trans_edp(dev_priv, &n_edp_entries); ddi_translations_hdmi = - skl_get_buf_trans_hdmi(dev, &n_hdmi_entries); + skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries); hdmi_default_entry = 8; /* If we're boosting the current, set bit 31 of trans1 */ if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level || dev_priv->vbt.ddi_port_info[port].dp_boost_level) iboost_bit = 1<<31; - } else if (IS_BROADWELL(dev)) { + + if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP && + port != PORT_A && port != PORT_E && + n_edp_entries > 9)) + n_edp_entries = 9; + } else if (IS_BROADWELL(dev_priv)) { ddi_translations_fdi = bdw_ddi_translations_fdi; ddi_translations_dp = bdw_ddi_translations_dp; ddi_translations_edp = bdw_ddi_translations_edp; @@ -470,7 +449,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); hdmi_default_entry = 7; - } else if (IS_HASWELL(dev)) { + } else if (IS_HASWELL(dev_priv)) { ddi_translations_fdi = hsw_ddi_translations_fdi; ddi_translations_dp = hsw_ddi_translations_dp; ddi_translations_edp = hsw_ddi_translations_dp; @@ -490,30 +469,18 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, hdmi_default_entry = 7; } - switch (port) { - case PORT_A: + switch (encoder->type) { + case INTEL_OUTPUT_EDP: ddi_translations = ddi_translations_edp; size = n_edp_entries; break; - case PORT_B: - case PORT_C: + case INTEL_OUTPUT_DISPLAYPORT: + case INTEL_OUTPUT_HDMI: ddi_translations = ddi_translations_dp; size = n_dp_entries; break; - case PORT_D: - if (intel_dp_is_edp(dev, PORT_D)) { - ddi_translations = ddi_translations_edp; - size = n_edp_entries; - } else { - ddi_translations = ddi_translations_dp; - size = n_dp_entries; - } - break; - case PORT_E: - if (ddi_translations_fdi) - ddi_translations = ddi_translations_fdi; - else - ddi_translations = ddi_translations_dp; + case INTEL_OUTPUT_ANALOG: + ddi_translations = ddi_translations_fdi; size = n_dp_entries; break; default: @@ -527,7 +494,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, ddi_translations[i].trans2); } - if (!supports_hdmi) + if (encoder->type != INTEL_OUTPUT_HDMI) return; /* Choose a good default if VBT is badly populated */ @@ -542,37 +509,6 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, ddi_translations_hdmi[hdmi_level].trans2); } -/* Program DDI buffers translations for DP. By default, program ports A-D in DP - * mode and port E for FDI. - */ -void intel_prepare_ddi(struct drm_device *dev) -{ - struct intel_encoder *intel_encoder; - bool visited[I915_MAX_PORTS] = { 0, }; - - if (!HAS_DDI(dev)) - return; - - for_each_intel_encoder(dev, intel_encoder) { - struct intel_digital_port *intel_dig_port; - enum port port; - bool supports_hdmi; - - if (intel_encoder->type == INTEL_OUTPUT_DSI) - continue; - - ddi_get_encoder_port(intel_encoder, &intel_dig_port, &port); - if (visited[port]) - continue; - - supports_hdmi = intel_dig_port && - intel_dig_port_supports_hdmi(intel_dig_port); - - intel_prepare_ddi_buffers(dev, port, supports_hdmi); - visited[port] = true; - } -} - static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, enum port port) { @@ -601,8 +537,14 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_encoder *encoder; u32 temp, i, rx_ctl_val; + for_each_encoder_on_crtc(dev, crtc, encoder) { + WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG); + intel_prepare_ddi_buffer(encoder); + } + /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the * mode set "sequence for CRT port" document: * - TP1 to TP2 time with the default value @@ -2085,10 +2027,9 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc) TRANS_CLK_SEL_DISABLED); } -static void skl_ddi_set_iboost(struct drm_device *dev, u32 level, - enum port port, int type) +static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv, + u32 level, enum port port, int type) { - struct drm_i915_private *dev_priv = dev->dev_private; const struct ddi_buf_trans *ddi_translations; uint8_t iboost; uint8_t dp_iboost, hdmi_iboost; @@ -2103,21 +2044,26 @@ static void skl_ddi_set_iboost(struct drm_device *dev, u32 level, if (dp_iboost) { iboost = dp_iboost; } else { - ddi_translations = skl_get_buf_trans_dp(dev, &n_entries); + ddi_translations = skl_get_buf_trans_dp(dev_priv, &n_entries); iboost = ddi_translations[level].i_boost; } } else if (type == INTEL_OUTPUT_EDP) { if (dp_iboost) { iboost = dp_iboost; } else { - ddi_translations = skl_get_buf_trans_edp(dev, &n_entries); + ddi_translations = skl_get_buf_trans_edp(dev_priv, &n_entries); + + if (WARN_ON(port != PORT_A && + port != PORT_E && n_entries > 9)) + n_entries = 9; + iboost = ddi_translations[level].i_boost; } } else if (type == INTEL_OUTPUT_HDMI) { if (hdmi_iboost) { iboost = hdmi_iboost; } else { - ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries); + ddi_translations = skl_get_buf_trans_hdmi(dev_priv, &n_entries); iboost = ddi_translations[level].i_boost; } } else { @@ -2142,10 +2088,9 @@ static void skl_ddi_set_iboost(struct drm_device *dev, u32 level, I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg); } -static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level, - enum port port, int type) +static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv, + u32 level, enum port port, int type) { - struct drm_i915_private *dev_priv = dev->dev_private; const struct bxt_ddi_buf_trans *ddi_translations; u32 n_entries, i; uint32_t val; @@ -2260,7 +2205,7 @@ static uint32_t translate_signal_level(int signal_levels) uint32_t ddi_signal_levels(struct intel_dp *intel_dp) { struct intel_digital_port *dport = dp_to_dig_port(intel_dp); - struct drm_device *dev = dport->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev); struct intel_encoder *encoder = &dport->base; uint8_t train_set = intel_dp->train_set[0]; int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | @@ -2270,10 +2215,10 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp) level = translate_signal_level(signal_levels); - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) - skl_ddi_set_iboost(dev, level, port, encoder->type); - else if (IS_BROXTON(dev)) - bxt_ddi_vswing_sequence(dev, level, port, encoder->type); + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + skl_ddi_set_iboost(dev_priv, level, port, encoder->type); + else if (IS_BROXTON(dev_priv)) + bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type); return DDI_BUF_TRANS_SELECT(level); } @@ -2325,12 +2270,12 @@ void intel_ddi_clk_select(struct intel_encoder *encoder, static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) { struct drm_encoder *encoder = &intel_encoder->base; - struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); enum port port = intel_ddi_get_encoder_port(intel_encoder); int type = intel_encoder->type; - int hdmi_level; + + intel_prepare_ddi_buffer(intel_encoder); if (type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -2348,17 +2293,11 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); intel_dp_start_link_train(intel_dp); - if (port != PORT_A || INTEL_INFO(dev)->gen >= 9) + if (port != PORT_A || INTEL_INFO(dev_priv)->gen >= 9) intel_dp_stop_link_train(intel_dp); } else if (type == INTEL_OUTPUT_HDMI) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); - if (IS_BROXTON(dev)) { - hdmi_level = dev_priv->vbt. - ddi_port_info[port].hdmi_level_shift; - bxt_ddi_vswing_sequence(dev, hdmi_level, port, - INTEL_OUTPUT_HDMI); - } intel_hdmi->set_infoframes(encoder, crtc->config->has_hdmi_sink, &crtc->config->base.adjusted_mode); @@ -3282,6 +3221,33 @@ void intel_ddi_init(struct drm_device *dev, enum port port) struct intel_encoder *intel_encoder; struct drm_encoder *encoder; bool init_hdmi, init_dp; + int max_lanes; + + if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) { + switch (port) { + case PORT_A: + max_lanes = 4; + break; + case PORT_E: + max_lanes = 0; + break; + default: + max_lanes = 4; + break; + } + } else { + switch (port) { + case PORT_A: + max_lanes = 2; + break; + case PORT_E: + max_lanes = 2; + break; + default: + max_lanes = 4; + break; + } + } init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi || dev_priv->vbt.ddi_port_info[port].supports_hdmi); @@ -3315,6 +3281,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port) intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES); + intel_dig_port->max_lanes = max_lanes; /* * Bspec says that DDI_A_4_LANES is the only supported configuration

index 5feb65725c04..8104511ad302 100644

--- a/

+++ b/ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.cindex 5feb65725c04..8104511ad302 100644--- a/ drivers/gpu/drm/i915/intel_display.c +++ b/ drivers/gpu/drm/i915/intel_display.c @@ -85,8 +85,6 @@ static const uint32_t intel_cursor_formats[] = { DRM_FORMAT_ARGB8888, }; -static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); - static void i9xx_crtc_clock_get(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); static void ironlake_pch_clock_get(struct intel_crtc *crtc, @@ -1152,11 +1150,6 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc) } } -static const char *state_string(bool enabled) -{ - return enabled ? "on" : "off"; -} - /* Only for pre-ILK configs */ void assert_pll(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) @@ -1168,7 +1161,7 @@ void assert_pll(struct drm_i915_private *dev_priv, cur_state = !!(val & DPLL_VCO_ENABLE); I915_STATE_WARN(cur_state != state, "PLL state assertion failure (expected %s, current %s)

", - state_string(state), state_string(cur_state)); + onoff(state), onoff(cur_state)); } /* XXX: the dsi pll is shared between MIPI DSI ports */ @@ -1184,7 +1177,7 @@ static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) cur_state = val & DSI_PLL_VCO_EN; I915_STATE_WARN(cur_state != state, "DSI PLL state assertion failure (expected %s, current %s)

", - state_string(state), state_string(cur_state)); + onoff(state), onoff(cur_state)); } #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true) #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false) @@ -1208,14 +1201,13 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, bool cur_state; struct intel_dpll_hw_state hw_state; - if (WARN (!pll, - "asserting DPLL %s with no DPLL

", state_string(state))) + if (WARN(!pll, "asserting DPLL %s with no DPLL

", onoff(state))) return; cur_state = pll->get_hw_state(dev_priv, pll, &hw_state); I915_STATE_WARN(cur_state != state, "%s assertion failure (expected %s, current %s)

", - pll->name, state_string(state), state_string(cur_state)); + pll->name, onoff(state), onoff(cur_state)); } static void assert_fdi_tx(struct drm_i915_private *dev_priv, @@ -1235,7 +1227,7 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv, } I915_STATE_WARN(cur_state != state, "FDI TX state assertion failure (expected %s, current %s)

", - state_string(state), state_string(cur_state)); + onoff(state), onoff(cur_state)); } #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) @@ -1250,7 +1242,7 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv, cur_state = !!(val & FDI_RX_ENABLE); I915_STATE_WARN(cur_state != state, "FDI RX state assertion failure (expected 