2018-09-19 00:21:57 +02:00
# include " stdafx.h "
2020-03-22 11:20:31 +01:00
# include "../Overlays/overlay_shader_compile_notification.h"
# include "../Overlays/Shaders/shader_loading_dialog_native.h"
2015-10-26 22:09:31 +01:00
# include "GLGSRender.h"
2019-10-02 02:47:19 +02:00
# include "GLCompute.h"
2016-10-18 09:57:28 +02:00
# include "GLVertexProgram.h"
2012-11-15 00:39:56 +01:00
2013-08-27 17:18:01 +02:00
# define DUMP_VERTEX_DATA 0
2012-11-15 00:39:56 +01:00
2016-01-13 17:40:10 +01:00
namespace
{
2016-01-20 18:12:48 +01:00
u32 get_max_depth_value ( rsx : : surface_depth_format format )
2016-01-13 17:40:10 +01:00
{
switch ( format )
{
2016-01-20 18:12:48 +01:00
case rsx : : surface_depth_format : : z16 : return 0xFFFF ;
case rsx : : surface_depth_format : : z24s8 : return 0xFFFFFF ;
2016-01-13 17:40:10 +01:00
}
2016-08-08 18:01:06 +02:00
fmt : : throw_exception ( " Unknown depth format " HERE ) ;
2016-06-30 06:46:25 +02:00
}
2016-01-13 17:40:10 +01:00
}
2018-10-11 00:17:19 +02:00
u64 GLGSRender : : get_cycles ( )
{
return thread_ctrl : : get_cycles ( static_cast < named_thread < GLGSRender > & > ( * this ) ) ;
}
2017-05-20 13:45:02 +02:00
GLGSRender : : GLGSRender ( ) : GSRender ( )
2015-10-04 00:45:26 +02:00
{
2019-08-23 18:36:01 +02:00
m_shaders_cache = std : : make_unique < gl : : shader_cache > ( m_prog_buffer , " opengl " , " v1.91 " ) ;
2017-07-26 18:32:13 +02:00
2019-06-17 22:08:17 +02:00
if ( g_cfg . video . disable_vertex_cache | | g_cfg . video . multithreaded_rsx )
2019-06-08 08:47:51 +02:00
m_vertex_cache = std : : make_unique < gl : : null_vertex_cache > ( ) ;
2017-07-26 18:32:13 +02:00
else
2019-06-08 08:47:51 +02:00
m_vertex_cache = std : : make_unique < gl : : weak_vertex_cache > ( ) ;
2017-09-07 21:32:52 +02:00
2019-10-13 21:37:10 +02:00
backend_config . supports_hw_a2c = false ;
2019-10-29 21:34:46 +01:00
backend_config . supports_hw_a2one = false ;
2019-10-13 21:37:10 +02:00
backend_config . supports_multidraw = true ;
2015-10-11 22:00:51 +02:00
}
2014-08-23 02:16:54 +02:00
2015-10-11 22:00:51 +02:00
extern CellGcmContextData current_context ;
2016-01-06 00:15:35 +01:00
void GLGSRender : : set_viewport ( )
{
2018-10-28 13:20:53 +01:00
// NOTE: scale offset matrix already contains the viewport transformation
2017-09-26 15:24:43 +02:00
const auto clip_width = rsx : : apply_resolution_scale ( rsx : : method_registers . surface_clip_width ( ) , true ) ;
const auto clip_height = rsx : : apply_resolution_scale ( rsx : : method_registers . surface_clip_height ( ) , true ) ;
2017-07-05 00:16:59 +02:00
glViewport ( 0 , 0 , clip_width , clip_height ) ;
2018-10-28 13:20:53 +01:00
}
2019-07-20 13:58:05 +02:00
void GLGSRender : : set_scissor ( bool clip_viewport )
2018-10-28 13:20:53 +01:00
{
2019-07-18 15:50:21 +02:00
areau scissor ;
2019-07-20 13:58:05 +02:00
if ( get_scissor ( scissor , clip_viewport ) )
2018-10-28 13:20:53 +01:00
{
2019-07-18 15:50:21 +02:00
// NOTE: window origin does not affect scissor region (probably only affects viewport matrix; already applied)
// See LIMBO [NPUB-30373] which uses shader window origin = top
glScissor ( scissor . x1 , scissor . y1 , scissor . width ( ) , scissor . height ( ) ) ;
gl_state . enable ( GL_TRUE , GL_SCISSOR_TEST ) ;
2018-10-28 13:20:53 +01:00
}
2016-01-06 00:15:35 +01:00
}
2015-11-26 09:06:29 +01:00
void GLGSRender : : on_init_thread ( )
2015-10-09 20:04:20 +02:00
{
2018-07-11 22:51:29 +02:00
verify ( HERE ) , m_frame ;
// NOTES: All contexts have to be created before any is bound to a thread
// This allows context sharing to work (both GLRCs passed to wglShareLists have to be idle or you get ERROR_BUSY)
m_context = m_frame - > make_context ( ) ;
2018-07-31 11:05:13 +02:00
if ( ! g_cfg . video . disable_asynchronous_shader_compiler )
{
m_decompiler_context = m_frame - > make_context ( ) ;
}
2018-07-11 22:51:29 +02:00
// Bind primary context to main RSX thread
m_frame - > set_current ( m_context ) ;
2020-04-05 13:16:57 +02:00
gl : : set_primary_context_thread ( ) ;
2018-07-11 22:51:29 +02:00
2018-03-05 12:09:43 +01:00
zcull_ctrl . reset ( static_cast < : : rsx : : reports : : ZCULL_control * > ( this ) ) ;
2015-10-11 22:00:51 +02:00
gl : : init ( ) ;
2017-04-04 18:14:36 +02:00
2017-08-13 19:41:55 +02:00
//Enable adaptive vsync if vsync is requested
gl : : set_swapinterval ( g_cfg . video . vsync ? - 1 : 0 ) ;
2017-05-20 13:45:02 +02:00
if ( g_cfg . video . debug_output )
2016-03-22 22:26:37 +01:00
gl : : enable_debugging ( ) ;
2017-04-04 18:14:36 +02:00
2020-02-01 09:07:25 +01:00
rsx_log . notice ( " GL RENDERER: %s (%s) " , reinterpret_cast < const char * > ( glGetString ( GL_RENDERER ) ) , reinterpret_cast < const char * > ( glGetString ( GL_VENDOR ) ) ) ;
rsx_log . notice ( " GL VERSION: %s " , reinterpret_cast < const char * > ( glGetString ( GL_VERSION ) ) ) ;
rsx_log . notice ( " GLSL VERSION: %s " , reinterpret_cast < const char * > ( glGetString ( GL_SHADING_LANGUAGE_VERSION ) ) ) ;
2015-10-11 22:00:51 +02:00
2017-06-19 12:47:38 +02:00
auto & gl_caps = gl : : get_driver_caps ( ) ;
2017-04-04 18:14:36 +02:00
if ( ! gl_caps . ARB_texture_buffer_supported )
{
fmt : : throw_exception ( " Failed to initialize OpenGL renderer. ARB_texture_buffer_object is required but not supported by your GPU " ) ;
}
if ( ! gl_caps . ARB_dsa_supported & & ! gl_caps . EXT_dsa_supported )
{
fmt : : throw_exception ( " Failed to initialize OpenGL renderer. ARB_direct_state_access or EXT_direct_state_access is required but not supported by your GPU " ) ;
}
2017-06-19 12:47:38 +02:00
if ( ! gl_caps . ARB_depth_buffer_float_supported & & g_cfg . video . force_high_precision_z_buffer )
{
2020-02-01 09:07:25 +01:00
rsx_log . warning ( " High precision Z buffer requested but your GPU does not support GL_ARB_depth_buffer_float. Option ignored. " ) ;
2017-06-19 12:47:38 +02:00
}
2017-06-22 20:25:58 +02:00
if ( ! gl_caps . ARB_texture_barrier_supported & & ! gl_caps . NV_texture_barrier_supported & & ! g_cfg . video . strict_rendering_mode )
{
2020-02-01 09:07:25 +01:00
rsx_log . warning ( " Texture barriers are not supported by your GPU. Feedback loops will have undefined results. " ) ;
2017-06-22 20:25:58 +02:00
}
2017-04-04 18:14:36 +02:00
//Use industry standard resource alignment values as defaults
m_uniform_buffer_offset_align = 256 ;
m_min_texbuffer_alignment = 256 ;
2018-01-21 16:31:35 +01:00
m_max_texbuffer_size = 0 ;
2017-04-04 18:14:36 +02:00
2015-10-11 22:00:51 +02:00
glEnable ( GL_VERTEX_PROGRAM_POINT_SIZE ) ;
2016-06-20 23:38:38 +02:00
glGetIntegerv ( GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT , & m_uniform_buffer_offset_align ) ;
2016-06-12 17:54:15 +02:00
glGetIntegerv ( GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT , & m_min_texbuffer_alignment ) ;
2018-01-21 16:31:35 +01:00
glGetIntegerv ( GL_MAX_TEXTURE_BUFFER_SIZE , & m_max_texbuffer_size ) ;
2015-10-14 00:45:18 +02:00
m_vao . create ( ) ;
2016-01-28 18:01:10 +01:00
2017-03-29 11:29:11 +02:00
//Set min alignment to 16-bytes for SSE optimizations with aligned addresses to work
m_min_texbuffer_alignment = std : : max ( m_min_texbuffer_alignment , 16 ) ;
m_uniform_buffer_offset_align = std : : max ( m_uniform_buffer_offset_align , 16 ) ;
2020-02-01 09:07:25 +01:00
rsx_log . notice ( " Supported texel buffer size reported: %d bytes " , m_max_texbuffer_size ) ;
2018-01-21 16:31:35 +01:00
if ( m_max_texbuffer_size < ( 16 * 0x100000 ) )
{
2020-02-01 09:07:25 +01:00
rsx_log . error ( " Max texture buffer size supported is less than 16M which is useless. Expect undefined behaviour. " ) ;
2018-01-21 16:31:35 +01:00
m_max_texbuffer_size = ( 16 * 0x100000 ) ;
}
2017-07-31 13:38:28 +02:00
//Array stream buffer
2016-01-28 18:01:10 +01:00
{
2018-04-07 12:19:49 +02:00
m_gl_persistent_stream_buffer = std : : make_unique < gl : : texture > ( GL_TEXTURE_BUFFER , 0 , 0 , 0 , 0 , GL_R8UI ) ;
2018-11-24 13:54:46 +01:00
_SelectTexture ( GL_STREAM_BUFFER_START + 0 ) ;
2018-04-07 12:19:49 +02:00
glBindTexture ( GL_TEXTURE_BUFFER , m_gl_persistent_stream_buffer - > id ( ) ) ;
2017-07-31 13:38:28 +02:00
}
2016-10-18 09:57:28 +02:00
2017-07-31 13:38:28 +02:00
//Register stream buffer
{
2018-04-07 12:19:49 +02:00
m_gl_volatile_stream_buffer = std : : make_unique < gl : : texture > ( GL_TEXTURE_BUFFER , 0 , 0 , 0 , 0 , GL_R8UI ) ;
2018-11-24 13:54:46 +01:00
_SelectTexture ( GL_STREAM_BUFFER_START + 1 ) ;
2018-04-07 12:19:49 +02:00
glBindTexture ( GL_TEXTURE_BUFFER , m_gl_volatile_stream_buffer - > id ( ) ) ;
2016-01-28 18:01:10 +01:00
}
2016-02-15 10:50:14 +01:00
2018-02-02 13:25:15 +01:00
//Fallback null texture instead of relying on texture0
{
2018-11-24 13:54:46 +01:00
std : : vector < u32 > pixeldata = { 0 , 0 , 0 , 0 } ;
2018-02-02 13:25:15 +01:00
//1D
2018-04-07 12:19:49 +02:00
auto tex1D = std : : make_unique < gl : : texture > ( GL_TEXTURE_1D , 1 , 1 , 1 , 1 , GL_RGBA8 ) ;
2019-10-12 00:05:05 +02:00
tex1D - > copy_from ( pixeldata . data ( ) , gl : : texture : : format : : rgba , gl : : texture : : type : : uint_8_8_8_8 , { } ) ;
2018-02-02 13:25:15 +01:00
//2D
2018-04-07 12:19:49 +02:00
auto tex2D = std : : make_unique < gl : : texture > ( GL_TEXTURE_2D , 1 , 1 , 1 , 1 , GL_RGBA8 ) ;
2019-10-12 00:05:05 +02:00
tex2D - > copy_from ( pixeldata . data ( ) , gl : : texture : : format : : rgba , gl : : texture : : type : : uint_8_8_8_8 , { } ) ;
2018-02-02 13:25:15 +01:00
//3D
2018-04-07 12:19:49 +02:00
auto tex3D = std : : make_unique < gl : : texture > ( GL_TEXTURE_3D , 1 , 1 , 1 , 1 , GL_RGBA8 ) ;
2019-10-12 00:05:05 +02:00
tex3D - > copy_from ( pixeldata . data ( ) , gl : : texture : : format : : rgba , gl : : texture : : type : : uint_8_8_8_8 , { } ) ;
2018-02-02 13:25:15 +01:00
//CUBE
2018-04-07 12:19:49 +02:00
auto texCUBE = std : : make_unique < gl : : texture > ( GL_TEXTURE_CUBE_MAP , 1 , 1 , 1 , 1 , GL_RGBA8 ) ;
2019-10-12 00:05:05 +02:00
texCUBE - > copy_from ( pixeldata . data ( ) , gl : : texture : : format : : rgba , gl : : texture : : type : : uint_8_8_8_8 , { } ) ;
2018-02-02 13:25:15 +01:00
m_null_textures [ GL_TEXTURE_1D ] = std : : move ( tex1D ) ;
m_null_textures [ GL_TEXTURE_2D ] = std : : move ( tex2D ) ;
m_null_textures [ GL_TEXTURE_3D ] = std : : move ( tex3D ) ;
m_null_textures [ GL_TEXTURE_CUBE_MAP ] = std : : move ( texCUBE ) ;
}
2017-04-04 18:14:36 +02:00
if ( ! gl_caps . ARB_buffer_storage_supported )
{
2020-02-01 09:07:25 +01:00
rsx_log . warning ( " Forcing use of legacy OpenGL buffers because ARB_buffer_storage is not supported " ) ;
2017-05-20 13:45:02 +02:00
// TODO: do not modify config options
g_cfg . video . gl_legacy_buffers . from_string ( " true " ) ;
2017-04-04 18:14:36 +02:00
}
2017-05-20 13:45:02 +02:00
if ( g_cfg . video . gl_legacy_buffers )
2016-10-18 09:57:28 +02:00
{
2020-02-01 09:07:25 +01:00
rsx_log . warning ( " Using legacy openGL buffers. " ) ;
2016-10-18 09:57:28 +02:00
manually_flush_ring_buffers = true ;
2016-06-12 11:05:22 +02:00
2019-06-08 08:47:51 +02:00
m_attrib_ring_buffer = std : : make_unique < gl : : legacy_ring_buffer > ( ) ;
m_transform_constants_buffer = std : : make_unique < gl : : legacy_ring_buffer > ( ) ;
m_fragment_constants_buffer = std : : make_unique < gl : : legacy_ring_buffer > ( ) ;
m_fragment_env_buffer = std : : make_unique < gl : : legacy_ring_buffer > ( ) ;
m_vertex_env_buffer = std : : make_unique < gl : : legacy_ring_buffer > ( ) ;
m_texture_parameters_buffer = std : : make_unique < gl : : legacy_ring_buffer > ( ) ;
m_vertex_layout_buffer = std : : make_unique < gl : : legacy_ring_buffer > ( ) ;
m_index_ring_buffer = std : : make_unique < gl : : legacy_ring_buffer > ( ) ;
2016-10-18 09:57:28 +02:00
}
else
{
2019-06-08 08:47:51 +02:00
m_attrib_ring_buffer = std : : make_unique < gl : : ring_buffer > ( ) ;
m_transform_constants_buffer = std : : make_unique < gl : : ring_buffer > ( ) ;
m_fragment_constants_buffer = std : : make_unique < gl : : ring_buffer > ( ) ;
m_fragment_env_buffer = std : : make_unique < gl : : ring_buffer > ( ) ;
m_vertex_env_buffer = std : : make_unique < gl : : ring_buffer > ( ) ;
m_texture_parameters_buffer = std : : make_unique < gl : : ring_buffer > ( ) ;
m_vertex_layout_buffer = std : : make_unique < gl : : ring_buffer > ( ) ;
m_index_ring_buffer = std : : make_unique < gl : : ring_buffer > ( ) ;
2016-10-18 09:57:28 +02:00
}
2018-02-22 09:13:01 +01:00
m_attrib_ring_buffer - > create ( gl : : buffer : : target : : texture , 256 * 0x100000 ) ;
m_index_ring_buffer - > create ( gl : : buffer : : target : : element_array , 64 * 0x100000 ) ;
m_transform_constants_buffer - > create ( gl : : buffer : : target : : uniform , 64 * 0x100000 ) ;
m_fragment_constants_buffer - > create ( gl : : buffer : : target : : uniform , 16 * 0x100000 ) ;
2018-10-20 16:43:00 +02:00
m_fragment_env_buffer - > create ( gl : : buffer : : target : : uniform , 16 * 0x100000 ) ;
m_vertex_env_buffer - > create ( gl : : buffer : : target : : uniform , 16 * 0x100000 ) ;
m_texture_parameters_buffer - > create ( gl : : buffer : : target : : uniform , 16 * 0x100000 ) ;
m_vertex_layout_buffer - > create ( gl : : buffer : : target : : uniform , 16 * 0x100000 ) ;
2018-02-22 09:13:01 +01:00
2018-04-29 15:14:53 +02:00
if ( gl_caps . vendor_AMD )
{
2019-06-08 08:47:51 +02:00
m_identity_index_buffer = std : : make_unique < gl : : buffer > ( ) ;
2019-08-20 20:01:27 +02:00
m_identity_index_buffer - > create ( gl : : buffer : : target : : element_array , 1 * 0x100000 , nullptr , gl : : buffer : : memory_type : : host_visible ) ;
2018-04-29 15:14:53 +02:00
// Initialize with 256k identity entries
2019-12-03 23:34:23 +01:00
auto * dst = reinterpret_cast < u32 * > ( m_identity_index_buffer - > map ( gl : : buffer : : access : : write ) ) ;
2018-04-29 15:14:53 +02:00
for ( u32 n = 0 ; n < ( 0x100000 > > 2 ) ; + + n )
{
dst [ n ] = n ;
}
m_identity_index_buffer - > unmap ( ) ;
}
2019-10-13 21:37:10 +02:00
else if ( gl_caps . vendor_NVIDIA )
{
// NOTE: On NVIDIA cards going back decades (including the PS3) there is a slight normalization inaccuracy in compressed formats.
// Confirmed in BLES01916 (The Evil Within) which uses RGB565 for some virtual texturing data.
backend_config . supports_hw_renormalization = true ;
}
2018-04-29 15:14:53 +02:00
2019-12-03 23:34:23 +01:00
m_persistent_stream_view . update ( m_attrib_ring_buffer . get ( ) , 0 , std : : min < u32 > ( static_cast < u32 > ( m_attrib_ring_buffer - > size ( ) ) , m_max_texbuffer_size ) ) ;
m_volatile_stream_view . update ( m_attrib_ring_buffer . get ( ) , 0 , std : : min < u32 > ( static_cast < u32 > ( m_attrib_ring_buffer - > size ( ) ) , m_max_texbuffer_size ) ) ;
2018-04-07 12:19:49 +02:00
m_gl_persistent_stream_buffer - > copy_from ( m_persistent_stream_view ) ;
m_gl_volatile_stream_buffer - > copy_from ( m_volatile_stream_view ) ;
2016-10-18 09:57:28 +02:00
m_vao . element_array_buffer = * m_index_ring_buffer ;
2016-10-20 05:20:45 +02:00
2017-05-20 13:45:02 +02:00
if ( g_cfg . video . overlay )
2017-04-04 18:14:36 +02:00
{
if ( gl_caps . ARB_shader_draw_parameters_supported )
{
m_text_printer . init ( ) ;
m_text_printer . set_enabled ( true ) ;
}
}
2017-02-16 19:29:56 +01:00
2018-11-24 13:54:46 +01:00
int image_unit = 0 ;
for ( auto & sampler : m_fs_sampler_states )
2017-03-29 21:27:29 +02:00
{
2018-11-24 13:54:46 +01:00
sampler . create ( ) ;
sampler . bind ( image_unit + + ) ;
2018-07-09 20:31:31 +02:00
}
2018-11-24 13:54:46 +01:00
for ( auto & sampler : m_fs_sampler_mirror_states )
{
sampler . create ( ) ;
sampler . apply_defaults ( ) ;
sampler . bind ( image_unit + + ) ;
}
for ( auto & sampler : m_vs_sampler_states )
2018-07-09 20:31:31 +02:00
{
2018-11-24 13:54:46 +01:00
sampler . create ( ) ;
sampler . bind ( image_unit + + ) ;
2017-03-29 21:27:29 +02:00
}
2017-07-27 18:04:55 +02:00
//Occlusion query
for ( u32 i = 0 ; i < occlusion_query_count ; + + i )
{
2017-11-16 22:52:21 +01:00
GLuint handle = 0 ;
2018-03-05 12:09:43 +01:00
auto & query = m_occlusion_query_data [ i ] ;
2017-11-16 22:52:21 +01:00
glGenQueries ( 1 , & handle ) ;
2018-09-03 21:28:33 +02:00
2019-12-03 23:34:23 +01:00
query . driver_handle = handle ;
2017-07-27 18:04:55 +02:00
query . pending = false ;
query . active = false ;
query . result = 0 ;
}
2017-05-11 00:42:55 +02:00
//Clip planes are shader controlled; enable all planes driver-side
glEnable ( GL_CLIP_DISTANCE0 + 0 ) ;
glEnable ( GL_CLIP_DISTANCE0 + 1 ) ;
glEnable ( GL_CLIP_DISTANCE0 + 2 ) ;
glEnable ( GL_CLIP_DISTANCE0 + 3 ) ;
glEnable ( GL_CLIP_DISTANCE0 + 4 ) ;
glEnable ( GL_CLIP_DISTANCE0 + 5 ) ;
2017-11-15 13:02:59 +01:00
m_depth_converter . create ( ) ;
2018-01-17 17:14:00 +01:00
m_ui_renderer . create ( ) ;
2018-03-23 12:49:15 +01:00
m_video_output_pass . create ( ) ;
2017-11-15 13:02:59 +01:00
2017-09-08 16:52:13 +02:00
m_gl_texture_cache . initialize ( ) ;
2017-08-10 21:40:20 +02:00
2020-03-09 10:15:59 +01:00
m_prog_buffer . initialize
(
[ this ] ( void * const & props , const RSXVertexProgram & vp , const RSXFragmentProgram & fp )
{
// Program was linked or queued for linking
m_shaders_cache - > store ( props , vp , fp ) ;
}
) ;
2019-08-26 02:08:16 +02:00
if ( ! m_overlay_manager )
2018-01-17 17:14:00 +01:00
{
m_frame - > hide ( ) ;
m_shaders_cache - > load ( nullptr ) ;
m_frame - > show ( ) ;
}
else
{
2020-01-03 12:04:18 +01:00
rsx : : shader_loading_dialog_native dlg ( this ) ;
2017-10-28 14:34:24 +02:00
2020-01-03 12:04:18 +01:00
m_shaders_cache - > load ( & dlg ) ;
2018-01-17 17:14:00 +01:00
}
2015-10-09 20:04:20 +02:00
}
2018-01-17 17:14:00 +01:00
2015-11-26 09:06:29 +01:00
void GLGSRender : : on_exit ( )
2015-10-09 20:04:20 +02:00
{
2019-10-12 00:05:05 +02:00
// Globals
// TODO: Move these
2019-10-02 02:47:19 +02:00
gl : : destroy_compute_tasks ( ) ;
2019-10-12 00:05:05 +02:00
if ( gl : : g_typeless_transfer_buffer )
{
gl : : g_typeless_transfer_buffer . remove ( ) ;
}
2018-03-05 12:09:43 +01:00
zcull_ctrl . release ( ) ;
2016-08-26 16:23:23 +02:00
m_prog_buffer . clear ( ) ;
2019-05-10 22:04:13 +02:00
m_rtts . destroy ( ) ;
2015-10-04 00:45:26 +02:00
2018-08-08 23:48:56 +02:00
for ( auto & fbo : m_framebuffer_cache )
2016-06-26 14:47:48 +02:00
{
2018-08-08 23:48:56 +02:00
fbo . remove ( ) ;
2016-06-26 14:47:48 +02:00
}
2015-10-04 00:45:26 +02:00
2018-08-08 23:48:56 +02:00
m_framebuffer_cache . clear ( ) ;
2015-10-11 22:00:51 +02:00
if ( m_flip_fbo )
2016-06-26 14:47:48 +02:00
{
2015-10-11 22:00:51 +02:00
m_flip_fbo . remove ( ) ;
2016-06-26 14:47:48 +02:00
}
2015-10-04 00:45:26 +02:00
2015-10-11 22:00:51 +02:00
if ( m_flip_tex_color )
2016-06-26 14:47:48 +02:00
{
2018-04-07 12:19:49 +02:00
m_flip_tex_color . reset ( ) ;
2016-06-26 14:47:48 +02:00
}
2015-10-14 00:45:18 +02:00
if ( m_vao )
2016-06-26 14:47:48 +02:00
{
2015-10-14 00:45:18 +02:00
m_vao . remove ( ) ;
2016-06-26 14:47:48 +02:00
}
2015-10-14 02:15:23 +02:00
2018-04-07 12:19:49 +02:00
m_gl_persistent_stream_buffer . reset ( ) ;
m_gl_volatile_stream_buffer . reset ( ) ;
2016-06-11 21:51:34 +02:00
2018-07-09 20:31:31 +02:00
for ( auto & sampler : m_fs_sampler_states )
{
sampler . remove ( ) ;
}
2018-11-24 13:54:46 +01:00
for ( auto & sampler : m_fs_sampler_mirror_states )
{
sampler . remove ( ) ;
}
2018-07-09 20:31:31 +02:00
for ( auto & sampler : m_vs_sampler_states )
2017-03-29 21:27:29 +02:00
{
sampler . remove ( ) ;
}
2017-04-17 23:30:34 +02:00
if ( m_attrib_ring_buffer )
{
m_attrib_ring_buffer - > remove ( ) ;
}
if ( m_transform_constants_buffer )
{
m_transform_constants_buffer - > remove ( ) ;
}
if ( m_fragment_constants_buffer )
{
m_fragment_constants_buffer - > remove ( ) ;
}
2018-10-20 16:43:00 +02:00
if ( m_fragment_env_buffer )
2017-04-17 23:30:34 +02:00
{
2018-10-20 16:43:00 +02:00
m_fragment_env_buffer - > remove ( ) ;
}
if ( m_vertex_env_buffer )
{
m_vertex_env_buffer - > remove ( ) ;
}
if ( m_texture_parameters_buffer )
{
m_texture_parameters_buffer - > remove ( ) ;
}
if ( m_vertex_layout_buffer )
{
m_vertex_layout_buffer - > remove ( ) ;
2017-04-17 23:30:34 +02:00
}
if ( m_index_ring_buffer )
{
m_index_ring_buffer - > remove ( ) ;
}
2016-07-20 14:16:19 +02:00
2018-04-29 15:14:53 +02:00
if ( m_identity_index_buffer )
{
m_identity_index_buffer - > remove ( ) ;
}
2018-04-07 12:19:49 +02:00
m_null_textures . clear ( ) ;
2016-10-11 02:55:42 +02:00
m_text_printer . close ( ) ;
2017-09-08 16:52:13 +02:00
m_gl_texture_cache . destroy ( ) ;
2017-11-15 13:02:59 +01:00
m_depth_converter . destroy ( ) ;
2018-01-17 17:14:00 +01:00
m_ui_renderer . destroy ( ) ;
2018-03-23 12:49:15 +01:00
m_video_output_pass . destroy ( ) ;
2016-10-11 02:55:42 +02:00
2017-07-27 18:04:55 +02:00
for ( u32 i = 0 ; i < occlusion_query_count ; + + i )
{
2018-03-05 12:09:43 +01:00
auto & query = m_occlusion_query_data [ i ] ;
2017-07-27 18:04:55 +02:00
query . active = false ;
query . pending = false ;
2019-12-03 23:34:23 +01:00
GLuint handle = query . driver_handle ;
2017-11-16 22:52:21 +01:00
glDeleteQueries ( 1 , & handle ) ;
query . driver_handle = 0 ;
2017-07-27 18:04:55 +02:00
}
2017-11-04 15:30:14 +01:00
glFlush ( ) ;
glFinish ( ) ;
2017-11-03 18:43:11 +01:00
GSRender : : on_exit ( ) ;
2012-11-15 00:39:56 +01:00
}
2017-02-16 19:29:56 +01:00
void GLGSRender : : clear_surface ( u32 arg )
2013-11-09 22:29:49 +01:00
{
2019-12-04 13:07:20 +01:00
if ( skip_current_frame ) return ;
2019-04-29 17:56:35 +02:00
// If stencil write mask is disabled, remove clear_stencil bit
if ( ! rsx : : method_registers . stencil_mask ( ) ) arg & = ~ 0x2u ;
// Ignore invalid clear flags
2017-06-30 00:20:23 +02:00
if ( ( arg & 0xf3 ) = = 0 ) return ;
2014-08-18 16:37:23 +02:00
2019-12-04 13:07:20 +01:00
u8 ctx = rsx : : framebuffer_creation_context : : context_draw ;
if ( arg & 0xF0 ) ctx | = rsx : : framebuffer_creation_context : : context_clear_color ;
if ( arg & 0x3 ) ctx | = rsx : : framebuffer_creation_context : : context_clear_depth ;
2019-12-15 11:38:42 +01:00
init_buffers ( static_cast < rsx : : framebuffer_creation_context > ( ctx ) , true ) ;
2019-12-04 13:07:20 +01:00
if ( ! framebuffer_status_valid ) return ;
2015-10-11 22:00:51 +02:00
GLbitfield mask = 0 ;
2019-05-14 18:50:45 +02:00
gl : : command_context cmd { gl_state } ;
const bool require_mem_load =
rsx : : method_registers . scissor_origin_x ( ) > 0 | |
rsx : : method_registers . scissor_origin_y ( ) > 0 | |
rsx : : method_registers . scissor_width ( ) < rsx : : method_registers . surface_clip_width ( ) | |
rsx : : method_registers . scissor_height ( ) < rsx : : method_registers . surface_clip_height ( ) ;
2019-08-27 13:55:45 +02:00
bool update_color = false , update_z = false ;
2016-06-26 23:37:02 +02:00
rsx : : surface_depth_format surface_depth_format = rsx : : method_registers . surface_depth_fmt ( ) ;
2016-06-20 23:38:38 +02:00
2018-12-30 21:47:15 +01:00
if ( auto ds = std : : get < 1 > ( m_rtts . m_bound_depth_stencil ) ; arg & 0x3 )
2015-10-09 20:04:20 +02:00
{
2018-12-30 21:47:15 +01:00
if ( arg & 0x1 )
{
u32 max_depth_value = get_max_depth_value ( surface_depth_format ) ;
u32 clear_depth = rsx : : method_registers . z_clear_value ( surface_depth_format = = rsx : : surface_depth_format : : z24s8 ) ;
2014-02-16 09:56:58 +01:00
2018-12-30 21:47:15 +01:00
gl_state . depth_mask ( GL_TRUE ) ;
gl_state . clear_depth ( f32 ( clear_depth ) / max_depth_value ) ;
mask | = GLenum ( gl : : buffers : : depth ) ;
}
2017-02-16 19:29:56 +01:00
2018-12-30 21:47:15 +01:00
if ( surface_depth_format = = rsx : : surface_depth_format : : z24s8 )
2017-06-30 23:24:41 +02:00
{
2018-12-30 21:47:15 +01:00
if ( arg & 0x2 )
{
u8 clear_stencil = rsx : : method_registers . stencil_clear_value ( ) ;
2014-02-16 09:56:58 +01:00
2018-12-30 21:47:15 +01:00
gl_state . stencil_mask ( rsx : : method_registers . stencil_mask ( ) ) ;
gl_state . clear_stencil ( clear_stencil ) ;
mask | = GLenum ( gl : : buffers : : stencil ) ;
}
2014-02-16 09:56:58 +01:00
2019-05-14 18:50:45 +02:00
if ( ( arg & 0x3 ) ! = 0x3 & & ! require_mem_load & & ds - > dirty ( ) )
2018-12-30 21:47:15 +01:00
{
verify ( HERE ) , mask ;
2015-10-04 00:45:26 +02:00
2019-06-12 20:02:51 +02:00
// Only one aspect was cleared. Make sure to memory initialize the other before removing dirty flag
2018-12-30 21:47:15 +01:00
if ( arg = = 1 )
{
// Depth was cleared, initialize stencil
gl_state . stencil_mask ( 0xFF ) ;
gl_state . clear_stencil ( 0xFF ) ;
mask | = GLenum ( gl : : buffers : : stencil ) ;
}
else
{
// Stencil was cleared, initialize depth
gl_state . depth_mask ( GL_TRUE ) ;
gl_state . clear_depth ( 1.f ) ;
mask | = GLenum ( gl : : buffers : : depth ) ;
}
}
}
if ( mask )
{
2019-05-14 18:50:45 +02:00
if ( require_mem_load ) ds - > write_barrier ( cmd ) ;
2018-12-30 21:47:15 +01:00
// Memory has been initialized
2019-08-27 13:55:45 +02:00
update_z = true ;
2018-12-30 21:47:15 +01:00
}
2015-10-11 22:00:51 +02:00
}
2018-04-01 15:41:57 +02:00
if ( auto colormask = ( arg & 0xf0 ) )
2015-10-11 22:00:51 +02:00
{
2018-04-01 15:41:57 +02:00
switch ( rsx : : method_registers . surface_color ( ) )
{
case rsx : : surface_color_format : : x32 :
case rsx : : surface_color_format : : w16z16y16x16 :
case rsx : : surface_color_format : : w32z32y32x32 :
{
2018-04-12 13:13:13 +02:00
//Nop
2018-04-01 15:41:57 +02:00
break ;
}
2018-04-12 13:13:13 +02:00
case rsx : : surface_color_format : : g8b8 :
{
colormask = rsx : : get_g8b8_r8g8_colormask ( colormask ) ;
2018-09-06 13:28:12 +02:00
[[fallthrough]] ;
2018-04-12 13:13:13 +02:00
}
2018-04-01 15:41:57 +02:00
default :
{
u8 clear_a = rsx : : method_registers . clear_color_a ( ) ;
u8 clear_r = rsx : : method_registers . clear_color_r ( ) ;
u8 clear_g = rsx : : method_registers . clear_color_g ( ) ;
u8 clear_b = rsx : : method_registers . clear_color_b ( ) ;
2015-10-04 00:45:26 +02:00
2018-04-01 15:41:57 +02:00
gl_state . clear_color ( clear_r , clear_g , clear_b , clear_a ) ;
mask | = GLenum ( gl : : buffers : : color ) ;
2017-06-30 23:24:41 +02:00
2019-08-27 13:55:45 +02:00
for ( u8 index = m_rtts . m_bound_render_targets_config . first , count = 0 ;
count < m_rtts . m_bound_render_targets_config . second ;
+ + count , + + index )
2017-06-30 23:24:41 +02:00
{
2019-08-27 13:55:45 +02:00
if ( require_mem_load ) m_rtts . m_bound_render_targets [ index ] . second - > write_barrier ( cmd ) ;
gl_state . color_maski ( count , colormask ) ;
2017-06-30 23:24:41 +02:00
}
2018-04-01 15:41:57 +02:00
2019-08-27 13:55:45 +02:00
update_color = true ;
2018-04-01 15:41:57 +02:00
break ;
}
2017-06-30 23:24:41 +02:00
}
2015-10-11 22:00:51 +02:00
}
2015-10-04 00:45:26 +02:00
2019-08-27 13:55:45 +02:00
if ( update_color | | update_z )
{
const bool write_all_mask [ ] = { true , true , true , true } ;
m_rtts . on_write ( update_color ? write_all_mask : nullptr , update_z ) ;
}
2016-01-06 00:15:35 +01:00
glClear ( mask ) ;
2013-11-09 22:29:49 +01:00
}
2013-08-26 16:18:59 +02:00
2018-07-11 22:51:29 +02:00
bool GLGSRender : : load_program ( )
2017-07-31 13:38:28 +02:00
{
2018-04-20 22:44:34 +02:00
if ( m_graphics_state & rsx : : pipeline_state : : invalidate_pipeline_bits )
2018-04-10 17:06:29 +02:00
{
get_current_fragment_program ( fs_sampler_state ) ;
verify ( HERE ) , current_fragment_program . valid ;
2015-10-04 00:45:26 +02:00
2018-07-09 20:31:31 +02:00
get_current_vertex_program ( vs_sampler_state ) ;
2017-08-10 21:40:20 +02:00
2018-04-10 17:06:29 +02:00
current_vertex_program . skip_vertex_input_check = true ; //not needed for us since decoding is done server side
current_fragment_program . unnormalized_coords = 0 ; //unused
2018-07-11 22:51:29 +02:00
}
else if ( m_program )
{
// Program already loaded
return true ;
}
2017-11-02 16:54:57 +01:00
2018-07-11 22:51:29 +02:00
void * pipeline_properties = nullptr ;
m_program = m_prog_buffer . get_graphics_pipeline ( current_vertex_program , current_fragment_program , pipeline_properties ,
2020-03-09 10:15:59 +01:00
! g_cfg . video . disable_asynchronous_shader_compiler , true ) . get ( ) ;
2017-11-02 16:54:57 +01:00
2018-07-11 22:51:29 +02:00
if ( m_prog_buffer . check_cache_missed ( ) )
{
// Notify the user with HUD notification
if ( g_cfg . misc . show_shader_compilation_hint )
{
if ( m_overlay_manager )
2018-03-20 02:00:49 +01:00
{
2018-07-11 22:51:29 +02:00
if ( auto dlg = m_overlay_manager - > get < rsx : : overlays : : shader_compile_notification > ( ) )
2018-04-10 17:06:29 +02:00
{
2018-07-11 22:51:29 +02:00
// Extend duration
dlg - > touch ( ) ;
}
else
{
// Create dialog but do not show immediately
m_overlay_manager - > create < rsx : : overlays : : shader_compile_notification > ( ) ;
2018-04-10 17:06:29 +02:00
}
2018-03-20 02:00:49 +01:00
}
2018-01-17 17:14:00 +01:00
}
}
2020-04-05 13:16:57 +02:00
else
{
verify ( HERE ) , m_program ;
m_program - > sync ( ) ;
}
2018-01-17 17:14:00 +01:00
2018-07-11 22:51:29 +02:00
return m_program ! = nullptr ;
}
2018-10-28 16:58:42 +01:00
void GLGSRender : : load_program_env ( )
2018-07-11 22:51:29 +02:00
{
if ( ! m_program )
{
fmt : : throw_exception ( " Unreachable right now " HERE ) ;
}
2018-10-20 16:43:00 +02:00
const u32 fragment_constants_size = current_fp_metadata . program_constants_buffer_length ;
const bool update_transform_constants = ! ! ( m_graphics_state & rsx : : pipeline_state : : transform_constants_dirty ) ;
const bool update_fragment_constants = ! ! ( m_graphics_state & rsx : : pipeline_state : : fragment_constants_dirty ) & & fragment_constants_size ;
const bool update_vertex_env = ! ! ( m_graphics_state & rsx : : pipeline_state : : vertex_state_dirty ) ;
const bool update_fragment_env = ! ! ( m_graphics_state & rsx : : pipeline_state : : fragment_state_dirty ) ;
const bool update_fragment_texture_env = ! ! ( m_graphics_state & rsx : : pipeline_state : : fragment_texture_state_dirty ) ;
2018-07-11 22:51:29 +02:00
m_program - > use ( ) ;
2017-03-11 10:07:26 +01:00
if ( manually_flush_ring_buffers )
{
2018-10-20 16:43:00 +02:00
if ( update_fragment_env ) m_fragment_env_buffer - > reserve_storage_on_heap ( 128 ) ;
if ( update_vertex_env ) m_vertex_env_buffer - > reserve_storage_on_heap ( 256 ) ;
if ( update_fragment_texture_env ) m_texture_parameters_buffer - > reserve_storage_on_heap ( 256 ) ;
2019-08-25 14:04:20 +02:00
if ( update_fragment_constants ) m_fragment_constants_buffer - > reserve_storage_on_heap ( align ( fragment_constants_size , 256 ) ) ;
2018-04-20 22:44:34 +02:00
if ( update_transform_constants ) m_transform_constants_buffer - > reserve_storage_on_heap ( 8192 ) ;
2017-03-11 10:07:26 +01:00
}
2018-10-20 16:43:00 +02:00
if ( update_vertex_env )
{
// Vertex state
2018-10-31 22:25:59 +01:00
auto mapping = m_vertex_env_buffer - > alloc_from_heap ( 144 , m_uniform_buffer_offset_align ) ;
2018-10-20 16:43:00 +02:00
auto buf = static_cast < u8 * > ( mapping . first ) ;
fill_scale_offset_data ( buf , false ) ;
fill_user_clip_data ( buf + 64 ) ;
* ( reinterpret_cast < u32 * > ( buf + 128 ) ) = rsx : : method_registers . transform_branch_bits ( ) ;
2018-10-31 22:25:59 +01:00
* ( reinterpret_cast < f32 * > ( buf + 132 ) ) = rsx : : method_registers . point_size ( ) ;
* ( reinterpret_cast < f32 * > ( buf + 136 ) ) = rsx : : method_registers . clip_min ( ) ;
* ( reinterpret_cast < f32 * > ( buf + 140 ) ) = rsx : : method_registers . clip_max ( ) ;
2018-10-20 16:43:00 +02:00
2019-10-02 01:26:29 +02:00
m_vertex_env_buffer - > bind_range ( GL_VERTEX_PARAMS_BIND_SLOT , mapping . second , 144 ) ;
2018-10-20 16:43:00 +02:00
}
2018-04-20 22:44:34 +02:00
if ( update_transform_constants )
2017-03-11 10:07:26 +01:00
{
// Vertex constants
2018-10-20 16:43:00 +02:00
auto mapping = m_transform_constants_buffer - > alloc_from_heap ( 8192 , m_uniform_buffer_offset_align ) ;
auto buf = static_cast < u8 * > ( mapping . first ) ;
2017-03-11 10:07:26 +01:00
fill_vertex_program_constants_data ( buf ) ;
2018-10-20 16:43:00 +02:00
2019-10-02 01:26:29 +02:00
m_transform_constants_buffer - > bind_range ( GL_VERTEX_CONSTANT_BUFFERS_BIND_SLOT , mapping . second , 8192 ) ;
2017-03-11 10:07:26 +01:00
}
2016-08-26 16:23:23 +02:00
2018-10-20 16:43:00 +02:00
if ( update_fragment_constants )
2018-02-23 09:30:13 +01:00
{
2018-10-20 16:43:00 +02:00
// Fragment constants
auto mapping = m_fragment_constants_buffer - > alloc_from_heap ( fragment_constants_size , m_uniform_buffer_offset_align ) ;
auto buf = static_cast < u8 * > ( mapping . first ) ;
2019-11-09 16:51:53 +01:00
m_prog_buffer . fill_fragment_constants_buffer ( { reinterpret_cast < float * > ( buf ) , fragment_constants_size } ,
2018-04-10 17:06:29 +02:00
current_fragment_program , gl : : get_driver_caps ( ) . vendor_NVIDIA ) ;
2018-10-20 16:43:00 +02:00
2019-10-02 01:26:29 +02:00
m_fragment_constants_buffer - > bind_range ( GL_FRAGMENT_CONSTANT_BUFFERS_BIND_SLOT , mapping . second , fragment_constants_size ) ;
2018-02-23 09:30:13 +01:00
}
2017-04-04 18:14:36 +02:00
2018-10-20 16:43:00 +02:00
if ( update_fragment_env )
{
// Fragment state
auto mapping = m_fragment_env_buffer - > alloc_from_heap ( 32 , m_uniform_buffer_offset_align ) ;
auto buf = static_cast < u8 * > ( mapping . first ) ;
fill_fragment_state_buffer ( buf , current_fragment_program ) ;
2019-10-02 01:26:29 +02:00
m_fragment_env_buffer - > bind_range ( GL_FRAGMENT_STATE_BIND_SLOT , mapping . second , 32 ) ;
2018-10-20 16:43:00 +02:00
}
2016-06-12 11:05:22 +02:00
2018-10-20 16:43:00 +02:00
if ( update_fragment_texture_env )
{
// Fragment texture parameters
auto mapping = m_texture_parameters_buffer - > alloc_from_heap ( 256 , m_uniform_buffer_offset_align ) ;
auto buf = static_cast < u8 * > ( mapping . first ) ;
fill_fragment_texture_parameters ( buf , current_fragment_program ) ;
2017-03-11 10:07:26 +01:00
2019-10-02 01:26:29 +02:00
m_texture_parameters_buffer - > bind_range ( GL_FRAGMENT_TEXTURE_PARAMS_BIND_SLOT , mapping . second , 256 ) ;
2018-10-20 16:43:00 +02:00
}
2016-06-27 00:52:08 +02:00
2016-10-18 09:57:28 +02:00
if ( manually_flush_ring_buffers )
2017-03-11 10:07:26 +01:00
{
2018-10-20 16:43:00 +02:00
if ( update_fragment_env ) m_fragment_env_buffer - > unmap ( ) ;
if ( update_vertex_env ) m_vertex_env_buffer - > unmap ( ) ;
if ( update_fragment_texture_env ) m_texture_parameters_buffer - > unmap ( ) ;
if ( update_fragment_constants ) m_fragment_constants_buffer - > unmap ( ) ;
2018-04-20 22:44:34 +02:00
if ( update_transform_constants ) m_transform_constants_buffer - > unmap ( ) ;
2017-03-11 10:07:26 +01:00
}
2016-10-18 09:57:28 +02:00
2018-10-20 16:43:00 +02:00
const u32 handled_flags = ( rsx : : pipeline_state : : fragment_state_dirty | rsx : : pipeline_state : : vertex_state_dirty | rsx : : pipeline_state : : transform_constants_dirty | rsx : : pipeline_state : : fragment_constants_dirty | rsx : : pipeline_state : : fragment_texture_state_dirty ) ;
2018-07-11 22:51:29 +02:00
m_graphics_state & = ~ handled_flags ;
2017-11-01 14:38:37 +01:00
}
2018-10-28 16:58:42 +01:00
void GLGSRender : : update_vertex_env ( const gl : : vertex_upload_info & upload_info )
{
if ( manually_flush_ring_buffers )
{
m_vertex_layout_buffer - > reserve_storage_on_heap ( 128 + 16 ) ;
}
// Vertex layout state
auto mapping = m_vertex_layout_buffer - > alloc_from_heap ( 128 + 16 , m_uniform_buffer_offset_align ) ;
2019-01-14 13:33:05 +01:00
auto buf = static_cast < u32 * > ( mapping . first ) ;
buf [ 0 ] = upload_info . vertex_index_base ;
buf [ 1 ] = upload_info . vertex_index_offset ;
2018-10-28 16:58:42 +01:00
buf + = 4 ;
2019-01-14 13:33:05 +01:00
2019-12-03 23:34:23 +01:00
fill_vertex_layout_state ( m_vertex_layout , upload_info . first_vertex , upload_info . allocated_vertex_count , reinterpret_cast < s32 * > ( buf ) , upload_info . persistent_mapping_offset , upload_info . volatile_mapping_offset ) ;
2018-10-28 16:58:42 +01:00
2019-10-02 01:26:29 +02:00
m_vertex_layout_buffer - > bind_range ( GL_VERTEX_LAYOUT_BIND_SLOT , mapping . second , 128 + 16 ) ;
2018-10-28 16:58:42 +01:00
if ( manually_flush_ring_buffers )
{
m_vertex_layout_buffer - > unmap ( ) ;
}
}
2016-02-15 10:50:14 +01:00
bool GLGSRender : : on_access_violation ( u32 address , bool is_writing )
{
2019-08-25 17:47:49 +02:00
const bool can_flush = ( std : : this_thread : : get_id ( ) = = m_rsx_thread ) ;
2018-09-22 02:14:26 +02:00
const rsx : : invalidation_cause cause =
is_writing ? ( can_flush ? rsx : : invalidation_cause : : write : rsx : : invalidation_cause : : deferred_write )
: ( can_flush ? rsx : : invalidation_cause : : read : rsx : : invalidation_cause : : deferred_read ) ;
2018-12-29 14:28:12 +01:00
2019-02-25 16:03:14 +01:00
auto cmd = can_flush ? gl : : command_context { gl_state } : gl : : command_context { } ;
auto result = m_gl_texture_cache . invalidate_address ( cmd , address , cause ) ;
2017-10-21 23:12:32 +02:00
2017-10-27 23:32:27 +02:00
if ( ! result . violation_handled )
2017-10-21 23:12:32 +02:00
return false ;
2017-10-30 13:27:22 +01:00
{
2018-09-03 21:28:33 +02:00
std : : lock_guard lock ( m_sampler_mutex ) ;
2017-10-30 13:27:22 +01:00
m_samplers_dirty . store ( true ) ;
}
2017-10-27 23:32:27 +02:00
if ( result . num_flushable > 0 )
2017-09-08 16:52:13 +02:00
{
2020-01-17 17:24:33 +01:00
auto & task = post_flush_request ( address , result ) ;
2017-09-08 16:52:13 +02:00
2017-10-21 23:12:32 +02:00
vm : : temporary_unlock ( ) ;
2018-05-29 14:11:34 +02:00
task . producer_wait ( ) ;
2017-09-08 16:52:13 +02:00
}
2017-10-21 23:12:32 +02:00
2017-10-23 14:39:24 +02:00
return true ;
2016-08-27 08:12:44 +02:00
}
2017-02-16 19:29:56 +01:00
2019-08-25 17:47:49 +02:00
void GLGSRender : : on_invalidate_memory_range ( const utils : : address_range & range , rsx : : invalidation_cause cause )
2017-08-07 23:54:40 +02:00
{
2018-12-29 14:28:12 +01:00
gl : : command_context cmd { gl_state } ;
2019-08-25 17:47:49 +02:00
auto data = std : : move ( m_gl_texture_cache . invalidate_range ( cmd , range , cause ) ) ;
2018-09-22 02:14:26 +02:00
AUDIT ( data . empty ( ) ) ;
2019-08-25 17:47:49 +02:00
if ( cause = = rsx : : invalidation_cause : : unmap & & data . violation_handled )
2017-10-30 13:27:22 +01:00
{
2018-09-22 02:14:26 +02:00
m_gl_texture_cache . purge_unreleased_sections ( ) ;
2017-10-30 13:27:22 +01:00
{
2018-09-03 21:28:33 +02:00
std : : lock_guard lock ( m_sampler_mutex ) ;
2017-10-30 13:27:22 +01:00
m_samplers_dirty . store ( true ) ;
}
}
2017-08-07 23:54:40 +02:00
}
2019-08-25 17:47:49 +02:00
void GLGSRender : : on_semaphore_acquire_wait ( )
{
2019-08-26 20:41:37 +02:00
if ( ! work_queue . empty ( ) | |
( async_flip_requested & flip_request : : emu_requested ) )
2019-08-25 17:47:49 +02:00
{
do_local_task ( rsx : : FIFO_state : : lock_wait ) ;
}
}
2018-05-29 13:53:16 +02:00
void GLGSRender : : do_local_task ( rsx : : FIFO_state state )
2017-02-16 19:29:56 +01:00
{
2018-02-02 07:25:30 +01:00
if ( ! work_queue . empty ( ) )
2017-02-16 19:29:56 +01:00
{
2018-09-03 21:28:33 +02:00
std : : lock_guard lock ( queue_guard ) ;
2018-02-03 09:37:42 +01:00
2020-01-17 17:24:33 +01:00
work_queue . remove_if ( [ ] ( auto & q ) { return q . received ; } ) ;
2018-02-02 07:25:30 +01:00
2020-01-17 17:24:33 +01:00
for ( auto & q : work_queue )
2018-02-02 07:25:30 +01:00
{
if ( q . processed ) continue ;
2017-03-10 14:27:38 +01:00
2018-12-29 14:28:12 +01:00
gl : : command_context cmd { gl_state } ;
q . result = m_gl_texture_cache . flush_all ( cmd , q . section_data ) ;
2018-02-02 07:25:30 +01:00
q . processed = true ;
}
2017-02-16 19:29:56 +01:00
}
2018-05-29 13:53:16 +02:00
else if ( ! in_begin_end & & state ! = rsx : : FIFO_state : : lock_wait )
2018-02-03 09:37:42 +01:00
{
2018-06-23 16:50:34 +02:00
if ( m_graphics_state & rsx : : pipeline_state : : framebuffer_reads_dirty )
{
//This will re-engage locks and break the texture cache if another thread is waiting in access violation handler!
//Only call when there are no waiters
m_gl_texture_cache . do_update ( ) ;
m_graphics_state & = ~ rsx : : pipeline_state : : framebuffer_reads_dirty ;
}
2018-02-03 09:37:42 +01:00
}
2018-01-17 17:14:00 +01:00
2018-05-29 13:53:16 +02:00
rsx : : thread : : do_local_task ( state ) ;
if ( state = = rsx : : FIFO_state : : lock_wait )
{
// Critical check finished
return ;
}
2018-05-20 22:05:00 +02:00
if ( m_overlay_manager )
2018-01-17 17:14:00 +01:00
{
2018-09-22 09:51:48 +02:00
if ( ! in_begin_end & & async_flip_requested & flip_request : : native_ui )
2018-01-17 17:14:00 +01:00
{
2019-09-19 19:08:06 +02:00
rsx : : display_flip_info_t info { } ;
info . buffer = current_display_buffer ;
flip ( info ) ;
2018-01-17 17:14:00 +01:00
}
}
2017-02-16 19:29:56 +01:00
}
2020-01-17 17:24:33 +01:00
gl : : work_item & GLGSRender : : post_flush_request ( u32 address , gl : : texture_cache : : thrashed_set & flush_data )
2017-02-16 19:29:56 +01:00
{
2018-09-03 21:28:33 +02:00
std : : lock_guard lock ( queue_guard ) ;
2017-02-16 19:29:56 +01:00
2020-01-17 17:24:33 +01:00
auto & result = work_queue . emplace_back ( ) ;
2017-02-16 19:29:56 +01:00
result . address_to_flush = address ;
2017-10-27 23:32:27 +02:00
result . section_data = std : : move ( flush_data ) ;
2017-02-16 19:29:56 +01:00
return result ;
}
2017-03-29 21:27:29 +02:00
bool GLGSRender : : scaled_image_from_memory ( rsx : : blit_src_info & src , rsx : : blit_dst_info & dst , bool interpolate )
{
2018-12-29 14:28:12 +01:00
gl : : command_context cmd { gl_state } ;
if ( m_gl_texture_cache . blit ( cmd , src , dst , interpolate , m_rtts ) )
2018-06-04 18:57:16 +02:00
{
m_samplers_dirty . store ( true ) ;
return true ;
}
return false ;
2017-03-29 21:27:29 +02:00
}
2017-07-27 18:04:55 +02:00
2017-10-26 05:01:10 +02:00
void GLGSRender : : notify_tile_unbound ( u32 tile )
{
2020-01-30 16:44:29 +01:00
// TODO: Handle texture writeback
if ( false )
{
u32 addr = rsx : : get_address ( tiles [ tile ] . offset , tiles [ tile ] . location , HERE ) ;
on_notify_memory_unmapped ( addr , tiles [ tile ] . size ) ;
m_rtts . invalidate_surface_address ( addr , false ) ;
}
2018-05-23 11:55:14 +02:00
{
2018-09-03 21:28:33 +02:00
std : : lock_guard lock ( m_sampler_mutex ) ;
2018-05-23 11:55:14 +02:00
m_samplers_dirty . store ( true ) ;
}
2017-10-26 05:01:10 +02:00
}
2018-03-05 12:09:43 +01:00
void GLGSRender : : begin_occlusion_query ( rsx : : reports : : occlusion_query_info * query )
2017-07-27 18:04:55 +02:00
{
2017-11-16 22:52:21 +01:00
query - > result = 0 ;
2019-12-03 23:34:23 +01:00
glBeginQuery ( GL_ANY_SAMPLES_PASSED , query - > driver_handle ) ;
2017-07-27 18:04:55 +02:00
}
2018-03-05 12:09:43 +01:00
void GLGSRender : : end_occlusion_query ( rsx : : reports : : occlusion_query_info * query )
2017-07-27 18:04:55 +02:00
{
2018-03-24 10:53:34 +01:00
verify ( HERE ) , query - > active ;
glEndQuery ( GL_ANY_SAMPLES_PASSED ) ;
2017-07-27 18:04:55 +02:00
}
2018-03-05 12:09:43 +01:00
bool GLGSRender : : check_occlusion_query_status ( rsx : : reports : : occlusion_query_info * query )
2017-07-27 18:04:55 +02:00
{
2018-03-05 12:09:43 +01:00
if ( ! query - > num_draws )
return true ;
2017-11-16 22:52:21 +01:00
GLint status = GL_TRUE ;
2019-12-03 23:34:23 +01:00
glGetQueryObjectiv ( query - > driver_handle , GL_QUERY_RESULT_AVAILABLE , & status ) ;
2017-08-11 22:32:44 +02:00
2017-11-16 22:52:21 +01:00
return status ! = GL_FALSE ;
2017-07-27 18:04:55 +02:00
}
2018-03-05 12:09:43 +01:00
void GLGSRender : : get_occlusion_query_result ( rsx : : reports : : occlusion_query_info * query )
2017-07-27 18:04:55 +02:00
{
2018-03-05 12:09:43 +01:00
if ( query - > num_draws )
{
2018-03-24 10:53:34 +01:00
GLint result = 0 ;
2019-12-03 23:34:23 +01:00
glGetQueryObjectiv ( query - > driver_handle , GL_QUERY_RESULT , & result ) ;
2017-07-27 18:04:55 +02:00
2018-03-05 12:09:43 +01:00
query - > result + = result ;
}
2018-01-17 17:14:00 +01:00
}
2018-03-13 14:34:31 +01:00
void GLGSRender : : discard_occlusion_query ( rsx : : reports : : occlusion_query_info * query )
{
2018-03-24 10:53:34 +01:00
if ( query - > active )
{
//Discard is being called on an active query, close it
glEndQuery ( GL_ANY_SAMPLES_PASSED ) ;
}
2018-03-13 14:34:31 +01:00
}
2018-07-11 22:51:29 +02:00
void GLGSRender : : on_decompiler_init ( )
{
// Bind decompiler context to this thread
m_frame - > set_current ( m_decompiler_context ) ;
}
void GLGSRender : : on_decompiler_exit ( )
{
// Cleanup
m_frame - > delete_context ( m_decompiler_context ) ;
}
bool GLGSRender : : on_decompiler_task ( )
{
2020-04-05 13:16:57 +02:00
return m_prog_buffer . async_update ( 8 ) . first ;
2018-07-11 22:51:29 +02:00
}