Hello guys,
I have this query which I need some performance gain, currently it takes 56.634145 seconds to execute 1000 rows. Can it be optimized with JOIN or something else than subqueries ?
I have now updated the post for readability.
SELECT
`id`,
`title`,
`alias`,
(SELECT `data_txt` FROM `data_field` WHERE `data_id` = data.id AND `fieldset_field_id` = '1' ORDER BY `version` DESC LIMIT 1) AS `textfield`,
(SELECT `data_txt` FROM `data_field` WHERE `data_id` = data.id AND `fieldset_field_id` = '2' ORDER BY `version` DESC LIMIT 1) AS `wysiwyg`,
(SELECT `data_txt` FROM `data_field` WHERE `data_id` = data.id AND `fieldset_field_id` = '3' ORDER BY `version` DESC LIMIT 1) AS `selectbox`,
(SELECT `data_txt` FROM `data_field` WHERE `data_id` = data.id AND `fieldset_field_id` = '4' ORDER BY `version` DESC LIMIT 1) AS `file`
FROM
`data`
WHERE
`fieldset_id` = '1'
AND
`storage` = 'collection'
AND
`storage_id` = '1'
AND
`language_id` = 1
AND
`datetime_begin` < NOW()
AND
(`datetime_end` > NOW() OR `datetime_end` = '0000-00-00 00:00:00')
ORDER BY
`index` DESC
The tables and some demo data
CREATE TABLE `data_field` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`data_id` int(11) DEFAULT NULL,
`fieldset_field_id` int(11) DEFAULT NULL,
`data_txt` text,
`version` int(6) NOT NULL DEFAULT '0',
PRIMARY KEY (`id`),
KEY `data_fieldset` (`data_id`,`fieldset_field_id`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1 AUTO_INCREMENT=1 ;
CREATE TABLE `data` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`title` varchar(64) DEFAULT NULL,
`alias` varchar(64) DEFAULT NULL,
`fieldset_id` int(11) DEFAULT NULL,
`storage` enum('collection','page') DEFAULT NULL,
`storage_id` int(11) DEFAULT NULL,
`language_id` int(11) DEFAULT NULL,
`datetime_begin` datetime NOT NULL DEFAULT '0000-00-00 00:00:00',
`datetime_end` datetime NOT NULL DEFAULT '0000-00-00 00:00:00',
`index` int(11) DEFAULT NULL,
`author` int(11) DEFAULT NULL,
`enabled` tinyint(1) DEFAULT '1',
`removed` tinyint(1) DEFAULT '0',
PRIMARY KEY (`id`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1 AUTO_INCREMENT=1 ;
INSERT INTO `data` (`id` ,`title` ,`alias` ,`fieldset_id` ,`storage` ,`storage_id` ,`language_id` ,`datetime_begin` ,`datetime_end` ,`index` ,`author` ,`enabled` ,`removed`) VALUES (NULL , 'Demo row', 'demo-row', '1', 'collection', '1', '1', '0000-00-0000:00:00', '0000-00-00 00:00:00', '0', '1', '1', '0');
INSERT INTO `data` (`id` ,`title` ,`alias` ,`fieldset_id` ,`storage` ,`storage_id` ,`language_id` ,`datetime_begin` ,`datetime_end` ,`index` ,`author` ,`enabled` ,`removed`) VALUES (NULL , 'Demo row', 'demo-row', '1', 'collection', '1', '1', '0000-00-0000:00:00', '0000-00-00 00:00:00', '0', '1', '1', '0');
INSERT INTO `data_field` (`id`, `data_id`, `fieldset_field_id`, `data_txt`, `version`) VALUES
(1, 1, 1, 'Porttitor egestas risus aliquam praesent vestibulum proin id varius urna egestas vestibulum ac adipiscing est', 0),
(2, 1, 2, 'Lorem ipsum', 0),
(3, 1, 3, '["foo","bar","baz"]', 0),
(4, 1, 3, 'Aliquet.jpg', 0),
(5, 2, 1, 'Sollicitudin turpis vehicula fringilla sem sem sit ut nulla mi id lobortis metus vel leo', 0),
(6, 2, 2, 'Lorem ipsum', 0),
(7, 2, 3, '["foo","bar","baz"]', 0);
---
Edit, I put index on two columns, it speeded up alot, went to 0.936033 per 1000 rows.
but still, can it be optimized with joins ?
---
Edit more: Updated readability.